repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
lefthandedroo/Cosmodels | [
"c355d18021467cf92546cf2fc9cb1d1abe59b8d8"
] | [
"History/Stats/emcee_ex_4_merging.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nchose m, c (parameters) for a straight line\nfrom the line pick N points (N=3, 5, 50, 100, 1000)\npick sigma (size of the noise)\nrandomly deviate(offset) points in y direction by using \nsigma*random number from normal distribution\nsigma the same for all points\nthen define the likelihood use likelihood for dataset \nwith gaussian error\nLookup how to write the eqution for a likelihood\n(and then use log likelihood)\nplug into emcee\ndraw a plot of c vs m displaying the walkers' walk\nproduce marginalised distribution - historgram \nfor every m and c found - plot them together \nplot data and error bars, \nplot what the actual model is\nfind max likelihood\nand m and b corresponding to max L\ndraw the line that they give\ntry for different sigmas\nmodify to find parameters with max posterior distribution\nuse those to plot the best line\nincrease number of dimensions, think of curve that requires 4-5 parameters\n(say polynomial)\ndo a multi dimensional search, need more walkers\nand more steps\n\nlook up first two erros - whether they are to do with python version\n\nTry to UNDERSTAND\n\nNotable results:\n best index is = 3010835\n abest is = 3.9419662932\n bbest is = -3.01946040697\n cbest is = 0.990232737609\n dbest is = 15.0034779775\n ebest is = 1.50005168141\n Mean acceptance fraction: 0.50711475\n Number of steps: 100000\n Number of walkers: 200\n Sampler time: 63min 42s\n Total time: 65min 30s\n\"\"\"\nimport corner\nimport emcee\nimport logging\nimport matplotlib.pyplot as pl\nimport numpy as np\nimport scipy.optimize as op\nimport sys\nimport time\n\n\ntry:\n timet0 = time.time() # starting script timer\n \n \n # Input\n # \"True\" parameters.\n a_true = 0.1\n b_true = -3\n c_true = 0.5\n d_true = 0.1\n e_true = 12\n \n N = 20 # number of datapoints\n sigma = 0.75 # standard deviation\n mu = 0 # mean\n \n ndim, nwalkers = 5, 12\n nsteps = 1000\n burnin = 500\n \n \n # Functions\n def lnlike(theta, x, y, sigma):\n a, b, c, d, e = theta\n model = a * x**4 + b * x**2 + c * x + d + e*np.sin(x)\n inv_sigma2 = 1.0/(sigma**2)\n return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2))) \n \n def lnprior(theta):\n a, b, c, d, e = theta\n if (-5.0 < a < 5 and -5.0 < b < 5.0 and 0.0 < c < 1.0 and 0.0 < d < 20 \n and -3.0 < e < 30):\n return 0.0\n return -np.inf\n \n def lnprob(theta, x, y, sigma):\n lp = lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, x, y, sigma) \n \n \n # Generating noisy data from the model y.\n x = np.random.rand(N)*4 # picking random points on x-axis\n yerr = np.random.normal(mu,sigma,N) # Gaussian noise\n y = a_true * x**4 + b_true * x**2 + c_true * x + d_true + e_true*np.sin(x) \n y += yerr # data, offset in y with noise\n \n \n # Finding a \"good\" place to start using alternative method to emcee.\n nll = lambda *args: -lnlike(*args)\n result = op.minimize(nll, [a_true, b_true, c_true, d_true, e_true], \n args=(x, y, yerr))\n a_ml, b_ml, c_ml, d_ml, e_ml = result[\"x\"] \n \n \n # Initializing walkers in a Gaussian ball around the max likelihood. \n pos = [result[\"x\"] + 1*np.random.randn(ndim) for i in range(nwalkers)] \n \n \n # Sampler setup\n times0 = time.time() # starting emcee timer\n \n sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, sigma))\n sampler.run_mcmc(pos, nsteps)\n \n times1=time.time() # stopping emcee timer\n times=times1 - times0 # time to run emcee\n timesmin = round((times / 60),1) # minutes\n timessec = round((times % 60),1) # seconds\n \n \n # Corner plot (walkers' walk + histogram).\n samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))\n fig = corner.corner(samples, labels=[\"$a$\", \"$b$\", \"$c$\", \"$d$\", \"$e$\"], \n truths=[a_true, b_true, c_true, d_true, e_true])\n fig.savefig('nsteps'+str(nsteps)+str(time.strftime(\"%c\"))+\n 'nwalkers'+str(nwalkers)+'.png')\n \n \n # Marginalised distribution (histogram) plot.\n pl.hist(sampler.flatchain[:,0], 100)\n pl.show()\n \n \n # Plotting lines of best fit using a 100-strong sample of parameters.\n xl = np.linspace(0,4,100)\n #for a, b, c, d, e in samples[np.random.randint(len(samples), size=100)]:\n # pl.plot(xl, a * xl**4 + b * xl**2 + c * xl + d +\n # e*np.sin(xl), color=\"k\", alpha=0.1)\n pl.plot(xl, a_true * xl**4 + b_true * xl**2 + c_true * xl + d_true + \n e_true*np.sin(xl),color=\"r\", lw=2, alpha=0.8)\n pl.errorbar(x, y, yerr=yerr, fmt=\".k\")\n pl.show()\n \n \n # Best line of fit found by emcee.\n bi = np.argmax(sampler.lnprobability) # index with highest post prob \n abest = sampler.flatchain[bi,0] # parameters with the highest \n bbest = sampler.flatchain[bi,1] # posterior probability\n cbest = sampler.flatchain[bi,2]\n dbest = sampler.flatchain[bi,3]\n ebest = sampler.flatchain[bi,4]\n \n \n # plot of data with errorbars + model\n pl.errorbar(x, y, yerr=sigma, fmt='o', alpha=0.3)\n xt = np.linspace(0,4,100)\n yt = (a_true * xt**4 + b_true * xt**2 + c_true * xt + d_true \n + e_true * np.sin(xt))\n model, = pl.plot(xt,yt,lw='3', c='g')\n ybest = (abest * xt**4 + bbest * xt**2 + cbest * xt + dbest \n + ebest * np.sin(xt))\n best_fit, = pl.plot(xt,ybest,lw='3', c='r')\n pl.legend([model, best_fit], ['Model', 'Best Fit'])\n pl.show\n \n \n timet1=time.time() # stopping script time\n timet=timet1-timet0 # total time to run script\n timetmin = round((timet / 60),1) # minutes\n timetsec = round((timet % 60),1) # seconds\n \n \n # Results getting printed:\n print('best index is =',str(bi))\n print('abest is =',str(abest))\n print('bbest is =',str(bbest))\n print('cbest is =',str(cbest))\n print('dbest is =',str(dbest))\n print('ebest is =',str(ebest))\n # Mean acceptance fraction. In general, acceptance fraction has an entry \n # for each walker so, in this case, it is a 50-dimensional vector.\n print('Mean acceptance fraction:', np.mean(sampler.acceptance_fraction))\n print('Number of steps:', str(nsteps))\n print('Number of walkers:', str(nwalkers))\n print('Sampler time:',str(int(timesmin))+'min'\n ,str(int(timessec))+'s')\n print('Total time: ',str(int(timetmin))+'min'\n ,str(int(timetsec))+'s')\n \n \nexcept Exception as e:\n logging.error('Caught exception:',str(e))\n print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.log",
"numpy.linspace",
"numpy.isfinite",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.random.normal",
"numpy.argmax",
"scipy.optimize.minimize",
"matplotlib.pyplot.errorbar",
"numpy.random.rand",
"numpy.mean",
"numpy.random.randn",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Pascal-Bliem/tox-block | [
"fed3d54553a0911d190e421feafddb11969878cd"
] | [
"tox_block/model/lstm_multi_label.py"
] | [
"\"\"\"A bidirectional LSTM model with multi labels (6 types of toxicity)\"\"\"\n\n# general data handling and computation\nimport pandas as pd\nimport numpy as np\n# TensorFlow / Keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Embedding, Input\nfrom tensorflow.keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\n# modules for this package\nfrom tox_block.config import config\n\n\ndef get_model(embedding_matrix: np.ndarray = None,\n embedding_size: int = config.EMBEDDING_SIZE,\n max_sequence_length: int = config.MAX_SEQUENCE_LENGTH,\n max_features: int = config.MAX_FEATURES,\n dropout: float = config.DROPOUT,\n num_lstm_units: int = config.NUM_LSTM_UNITS,\n num_dense_units: int = config.NUM_DENSE_UNITS,\n learning_rate: float = config.LEARNING_RATE):\n \"\"\"Returns a bidirectional LSTM model\"\"\"\n \n inp = Input(shape=(max_sequence_length, ))\n if not embedding_matrix is None:\n x = Embedding(max_features, \n embedding_size, \n weights=[embedding_matrix])(inp)\n else:\n x = Embedding(max_features, \n embedding_size)(inp)\n x = Bidirectional(LSTM(num_lstm_units, \n return_sequences=True, \n dropout=dropout, \n recurrent_dropout=dropout))(x)\n x = GlobalMaxPool1D()(x)\n x = Dense(num_dense_units, activation=\"relu\")(x)\n x = Dropout(rate=dropout)(x)\n x = Dense(6, activation=\"sigmoid\")(x)\n model = Model(inputs=inp, outputs=x)\n model.compile(Adam(lr=learning_rate),\n loss=\"binary_crossentropy\",\n metrics=[\"accuracy\"])\n\n return model\n\n# callbacks for training\ncheckpoint = ModelCheckpoint(config.TRAINED_MODEL_DIR + \"/checkpoint.h5\", \n monitor=\"val_loss\", \n verbose=1, \n save_best_only=True, \n mode=\"min\")\n\nearly_stop = EarlyStopping(monitor=\"val_loss\", \n mode=\"min\", \n patience=2,\n restore_best_weights=True)\n\nreduce_lr = ReduceLROnPlateau(monitor=\"val_loss\",\n factor=0.5,\n patience=1,\n verbose=1,\n mode=\"min\",\n min_lr=0.00001)\n\ncallbacks_list = [checkpoint, early_stop, reduce_lr]\n\nif __name__ == '__main__':\n model = get_model()\n model.summary()"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.GlobalMaxPool1D",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
frgfm/torch-zoo | [
"c97beacf3d49eaa34398abf47f378ea6b48a70f3",
"c97beacf3d49eaa34398abf47f378ea6b48a70f3",
"c97beacf3d49eaa34398abf47f378ea6b48a70f3"
] | [
"holocron/nn/modules/conv.py",
"holocron/models/classification/darknetv4.py",
"references/clean_checkpoint.py"
] | [
"# Copyright (C) 2019-2022, François-Guillaume Fernandez.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport math\nfrom typing import Any, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.nn.functional import pad\nfrom torch.nn.modules.conv import _ConvNd\nfrom torch.nn.modules.utils import _pair\n\nfrom .. import functional as F\n\n__all__ = [\"NormConv2d\", \"Add2d\", \"SlimConv2d\", \"PyConv2d\", \"Involution2d\"]\n\n\nclass _NormConvNd(_ConvNd):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int,\n padding: int,\n dilation: int,\n transposed: bool,\n output_padding: int,\n groups: int,\n bias: bool,\n padding_mode: str,\n normalize_slices=False,\n eps=1e-14,\n ) -> None:\n super().__init__(\n in_channels,\n out_channels,\n kernel_size, # type: ignore[arg-type]\n stride, # type: ignore[arg-type]\n padding, # type: ignore[arg-type]\n dilation, # type: ignore[arg-type]\n transposed,\n output_padding, # type: ignore[arg-type]\n groups,\n bias,\n padding_mode,\n )\n self.normalize_slices = normalize_slices\n self.eps = eps\n\n\nclass NormConv2d(_NormConvNd):\n r\"\"\"Implements the normalized convolution module from `\"Normalized Convolutional Neural Network\"\n <https://arxiv.org/pdf/2005.05274v2.pdf>`_.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`\n can be precisely described as:\n\n .. math::\n out(N_i, C_{out_j}) = bias(C_{out_j}) +\n \\sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \\star\n \\frac{input(N_i, k) - \\mu(N_i, k)}{\\sqrt{\\sigma^2(N_i, k) + \\epsilon}}\n\n where :math:`\\star` is the valid 2D cross-correlation operator,\n :math:`\\mu(N_i, k)` and :math:`\\sigma²(N_i, k)` are the mean and variance of :math:`input(N_i, k)` over all slices,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel\n elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n eps (float, optional): a value added to the denominator for numerical stability.\n Default: 1e-14\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n eps: float = 1e-14,\n ) -> None:\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super().__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation,\n False,\n _pair(0),\n groups,\n bias,\n padding_mode,\n False,\n eps,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n if self.padding_mode != \"zeros\":\n return F.norm_conv2d(\n pad(x, self._reversed_padding_repeated_twice, mode=self.padding_mode),\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n _pair(0),\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.eps,\n )\n return F.norm_conv2d(\n x,\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n self.padding, # type: ignore[arg-type]\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.eps,\n )\n\n\nclass Add2d(_NormConvNd):\n r\"\"\"Implements the adder module from `\"AdderNet: Do We Really Need Multiplications in Deep Learning?\"\n <https://arxiv.org/pdf/1912.13200.pdf>`_.\n\n In the simplest case, the output value of the layer at position :math:`(m, n)` in channel :math:`c`\n with filter F of spatial size :math:`(d, d)`, intput size :math:`(C_{in}, H, W)` and output :math:`(C_{out}, H, W)`\n can be precisely described as:\n\n .. math::\n out(m, n, c) = - \\sum\\limits_{i=0}^d \\sum\\limits_{j=0}^d \\sum\\limits_{k=0}^{C_{in}}\n |X(m + i, n + j, k) - F(i, j, k, c)|\n\n where :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/add2d.png\n :align: center\n :alt: Add2D schema\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel\n elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n normalize_slices (bool, optional): whether slices should be normalized before performing cross-correlation.\n Default: False\n eps (float, optional): a value added to the denominator for numerical stability.\n Default: 1e-14\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n normalize_slices: bool = False,\n eps: float = 1e-14,\n ) -> None:\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super().__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation,\n False,\n _pair(0),\n groups,\n bias,\n padding_mode,\n normalize_slices,\n eps,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n if self.padding_mode != \"zeros\":\n return F.add2d(\n pad(x, self._reversed_padding_repeated_twice, mode=self.padding_mode),\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n _pair(0),\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.normalize_slices,\n self.eps,\n )\n return F.add2d(\n x,\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n self.padding, # type: ignore[arg-type]\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.normalize_slices,\n self.eps,\n )\n\n\nclass SlimConv2d(nn.Module):\n r\"\"\"Implements the convolution module from `\"SlimConv: Reducing Channel Redundancy in Convolutional Neural Networks\n by Weights Flipping\" <https://arxiv.org/pdf/2003.07469.pdf>`_.\n\n First, we compute channel-wise weights as follows:\n\n .. math::\n z(c) = \\frac{1}{H \\cdot W} \\sum\\limits_{i=1}^H \\sum\\limits_{j=1}^W X_{c,i,j}\n\n where :math:`X \\in \\mathbb{R}^{C \\times H \\times W}` is the input tensor,\n :math:`H` is height in pixels, and :math:`W` is\n width in pixels.\n\n .. math::\n w = \\sigma(F_{fc2}(\\delta(F_{fc1}(z))))\n\n where :math:`z \\in \\mathbb{R}^{C}` contains channel-wise statistics,\n :math:`\\sigma` refers to the sigmoid function,\n :math:`\\delta` refers to the ReLU function,\n :math:`F_{fc1}` is a convolution operation with kernel of size :math:`(1, 1)`\n with :math:`max(C/r, L)` output channels followed by batch normalization,\n and :math:`F_{fc2}` is a plain convolution operation with kernel of size :math:`(1, 1)`\n with :math:`C` output channels.\n\n We then proceed with reconstructing and transforming both pathways:\n\n .. math::\n X_{top} = X \\odot w\n\n .. math::\n X_{bot} = X \\odot \\check{w}\n\n where :math:`\\odot` refers to the element-wise multiplication and :math:`\\check{w}` is\n the channel-wise reverse-flip of :math:`w`.\n\n .. math::\n T_{top} = F_{top}(X_{top}^{(1)} + X_{top}^{(2)})\n\n .. math::\n T_{bot} = F_{bot}(X_{bot}^{(1)} + X_{bot}^{(2)})\n\n where :math:`X^{(1)}` and :math:`X^{(2)}` are the channel-wise first and second halves of :math:`X`,\n :math:`F_{top}` is a convolution of kernel size :math:`(3, 3)`,\n and :math:`F_{bot}` is a convolution of kernel size :math:`(1, 1)` reducing channels by half,\n followed by a convolution of kernel size :math:`(3, 3)`.\n\n Finally we fuse both pathways to yield the output:\n\n .. math::\n Y = T_{top} \\oplus T_{bot}\n\n where :math:`\\oplus` is the channel-wise concatenation.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/slimconv2d.png\n :align: center\n :alt: SlimConv2D schema\n\n\n Args:\n in_channels (int): Number of channels in the input image\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel\n elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n r (int, optional): squeezing divider. Default: 32\n L (int, optional): minimum squeezed channels. Default: 8\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n r: int = 32,\n L: int = 2,\n ) -> None:\n super().__init__()\n self.fc1 = nn.Conv2d(in_channels, max(in_channels // r, L), 1)\n self.bn = nn.BatchNorm2d(max(in_channels // r, L))\n self.fc2 = nn.Conv2d(max(in_channels // r, L), in_channels, 1)\n self.conv_top = nn.Conv2d(\n in_channels // 2, in_channels // 2, kernel_size, stride, padding, dilation, groups, bias, padding_mode\n )\n self.conv_bot1 = nn.Conv2d(in_channels // 2, in_channels // 4, 1)\n self.conv_bot2 = nn.Conv2d(\n in_channels // 4, in_channels // 4, kernel_size, stride, padding, dilation, groups, bias, padding_mode\n )\n\n def forward(self, x: Tensor) -> Tensor:\n # Channel-wise weights\n z = x.mean(dim=(2, 3), keepdim=True)\n z = self.bn(self.fc1(z))\n z = self.fc2(torch.relu(z))\n w = torch.sigmoid(z)\n\n # Compression\n X_w = x * w\n X_top = X_w[:, : x.shape[1] // 2] + X_w[:, x.shape[1] // 2 :]\n X_w = x * w.flip(dims=(1,))\n X_bot = X_w[:, : x.shape[1] // 2] + X_w[:, x.shape[1] // 2 :]\n\n # Transform\n X_top = self.conv_top(X_top)\n X_bot = self.conv_bot2(self.conv_bot1(X_bot))\n\n # Fuse\n return torch.cat((X_top, X_bot), dim=1)\n\n\nclass PyConv2d(nn.ModuleList):\n \"\"\"Implements the convolution module from `\"Pyramidal Convolution: Rethinking Convolutional Neural Networks for\n Visual Recognition\" <https://arxiv.org/pdf/2006.11538.pdf>`_.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/pyconv2d.png\n :align: center\n :alt: PyConv2D schema\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int): Size of the convolving kernel\n num_levels (int, optional): number of stacks in the pyramid\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n groups (list(int), optional): Number of blocked connections from input\n channels to output channels. Default: 1\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n num_levels: int = 2,\n padding: int = 0,\n groups: Optional[List[int]] = None,\n **kwargs: Any,\n ) -> None:\n\n if num_levels == 1:\n super().__init__(\n [\n nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding,\n groups=groups[0] if isinstance(groups, list) else 1,\n **kwargs,\n )\n ]\n )\n else:\n exp2 = int(math.log2(num_levels))\n reminder = num_levels - 2**exp2\n out_chans = [out_channels // 2 ** (exp2 + 1)] * (2 * reminder) + [out_channels // 2**exp2] * (\n num_levels - 2 * reminder\n )\n\n k_sizes = [kernel_size + 2 * idx for idx in range(num_levels)]\n if groups is None:\n groups = [1] + [\n min(2 ** (2 + idx), out_chan) for idx, out_chan in zip(range(num_levels - 1), out_chans[1:])\n ]\n elif not isinstance(groups, list) or len(groups) != num_levels:\n raise ValueError(\"The argument `group` is expected to be a list of integer of size `num_levels`.\")\n paddings = [padding + idx for idx in range(num_levels)]\n\n super().__init__(\n [\n nn.Conv2d(in_channels, out_chan, k_size, padding=padding, groups=group, **kwargs)\n for out_chan, k_size, padding, group in zip(out_chans, k_sizes, paddings, groups)\n ]\n )\n self.num_levels = num_levels\n\n def forward(self, x):\n\n if self.num_levels == 1:\n return self[0].forward(x)\n return torch.cat([conv(x) for conv in self], dim=1)\n\n\nclass Involution2d(nn.Module):\n \"\"\"Implements the convolution module from `\"Involution: Inverting the Inherence of Convolution for Visual\n Recognition\" <https://arxiv.org/pdf/2103.06255.pdf>`_, adapted from the proposed PyTorch implementation in\n the paper.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/involutions.png\n :align: center\n :alt: Involution2d schema\n\n Args:\n in_channels (int): Number of channels in the input image\n kernel_size (int): Size of the convolving kernel\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n stride: Stride of the convolution. Default: 1\n groups: Number of blocked connections from input channels to output channels. Default: 1\n dilation: Spacing between kernel elements. Default: 1\n reduction_ratio: reduction ratio of the channels to generate the kernel\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n kernel_size: int,\n padding: int = 0,\n stride: int = 1,\n groups: int = 1,\n dilation: int = 1,\n reduction_ratio: float = 1,\n ) -> None:\n\n super().__init__()\n\n self.groups = groups\n self.k_size = kernel_size\n\n self.pool = nn.AvgPool2d(stride, stride) if stride > 1 else None\n self.reduce = nn.Conv2d(in_channels, int(in_channels // reduction_ratio), 1)\n self.span = nn.Conv2d(int(in_channels // reduction_ratio), kernel_size**2 * groups, 1)\n self.unfold = nn.Unfold(kernel_size, dilation, padding, stride)\n\n def forward(self, x):\n\n # Kernel generation\n # (N, C, H, W) --> (N, C, H // s, W // s)\n kernel = self.pool(x) if isinstance(self.pool, nn.Module) else x\n # --> (N, C // r, H // s, W // s)\n kernel = self.reduce(kernel)\n # --> (N, K * K * G, H // s, W // s)\n kernel = self.span(kernel)\n # --> (N, G, 1, K ** 2, H // s, W // s)\n kernel = kernel.view(x.shape[0], self.groups, 1, self.k_size**2, *kernel.shape[-2:])\n\n # --> (N, C * K ** 2, H * W // s ** 2)\n x_unfolded = self.unfold(x)\n # --> (N, G, C // G, K ** 2, H // s, W // s)\n x_unfolded = x_unfolded.reshape(x.shape[0], self.groups, x.shape[1] // self.groups, -1, *kernel.shape[-2:])\n\n # Multiply-Add operation\n # --> (N, C, H // s, W // s)\n out = (kernel * x_unfolded).sum(dim=3).view(*x.shape[:2], *kernel.shape[-2:])\n\n return out\n",
"# Copyright (C) 2020-2022, François-Guillaume Fernandez.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom collections import OrderedDict\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom holocron.nn import DropBlock2d, GlobalAvgPool2d\nfrom holocron.nn.init import init_module\n\nfrom ..presets import IMAGENETTE\nfrom ..utils import conv_sequence, load_pretrained_params\nfrom .darknetv3 import ResBlock\n\n__all__ = [\"DarknetV4\", \"cspdarknet53\", \"cspdarknet53_mish\"]\n\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n \"cspdarknet53\": {\n **IMAGENETTE,\n \"input_shape\": (3, 224, 224),\n \"url\": \"https://github.com/frgfm/Holocron/releases/download/v0.1.3/cspdarknet53_224-d2a17b18.pt\",\n },\n \"cspdarknet53_mish\": {\n **IMAGENETTE,\n \"input_shape\": (3, 224, 224),\n \"url\": \"https://github.com/frgfm/Holocron/releases/download/v0.1.3/cspdarknet53_mish_256-32d8ec68.pt\",\n },\n}\n\n\nclass CSPStage(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n num_blocks: int = 1,\n act_layer: Optional[nn.Module] = None,\n norm_layer: Optional[Callable[[int], nn.Module]] = None,\n drop_layer: Optional[Callable[..., nn.Module]] = None,\n conv_layer: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n super().__init__()\n compression = 2 if num_blocks > 1 else 1\n self.base_layer = nn.Sequential(\n *conv_sequence(\n in_channels,\n out_channels,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n kernel_size=3,\n padding=1,\n stride=2,\n bias=(norm_layer is None),\n ),\n # Share the conv\n *conv_sequence(\n out_channels,\n 2 * out_channels // compression,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n kernel_size=1,\n bias=(norm_layer is None),\n ),\n )\n self.main = nn.Sequential(\n *[\n ResBlock(\n out_channels // compression,\n out_channels // compression if num_blocks > 1 else in_channels,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n )\n for _ in range(num_blocks)\n ],\n *conv_sequence(\n out_channels // compression,\n out_channels // compression,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n kernel_size=1,\n bias=(norm_layer is None),\n ),\n )\n self.transition = nn.Sequential(\n *conv_sequence(\n 2 * out_channels // compression,\n out_channels,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n kernel_size=1,\n bias=(norm_layer is None),\n )\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.base_layer(x)\n x1, x2 = x.chunk(2, dim=1)\n return self.transition(torch.cat([x1, self.main(x2)], dim=1))\n\n\nclass DarknetBodyV4(nn.Sequential):\n def __init__(\n self,\n layout: List[Tuple[int, int]],\n in_channels: int = 3,\n stem_channels: int = 32,\n num_features: int = 1,\n act_layer: Optional[nn.Module] = None,\n norm_layer: Optional[Callable[[int], nn.Module]] = None,\n drop_layer: Optional[Callable[..., nn.Module]] = None,\n conv_layer: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n\n super().__init__()\n\n if act_layer is None:\n act_layer = nn.LeakyReLU(inplace=True)\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n\n in_chans = [stem_channels] + [_layout[0] for _layout in layout[:-1]]\n\n super().__init__(\n OrderedDict(\n [\n (\n \"stem\",\n nn.Sequential(\n *conv_sequence(\n in_channels,\n stem_channels,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n kernel_size=3,\n padding=1,\n bias=(norm_layer is None),\n )\n ),\n ),\n (\n \"stages\",\n nn.Sequential(\n *[\n CSPStage(\n _in_chans, out_chans, num_blocks, act_layer, norm_layer, drop_layer, conv_layer\n )\n for _in_chans, (out_chans, num_blocks) in zip(in_chans, layout)\n ]\n ),\n ),\n ]\n )\n )\n\n self.num_features = num_features\n\n def forward(self, x: torch.Tensor) -> Union[torch.Tensor, List[torch.Tensor]]:\n\n if self.num_features == 1:\n return super().forward(x)\n\n self.stem: nn.Sequential\n self.stages: nn.Sequential\n x = self.stem(x)\n features = []\n for idx, stage in enumerate(self.stages):\n x = stage(x)\n if idx >= (len(self.stages) - self.num_features):\n features.append(x)\n\n return features\n\n\nclass DarknetV4(nn.Sequential):\n def __init__(\n self,\n layout: List[Tuple[int, int]],\n num_classes: int = 10,\n in_channels: int = 3,\n stem_channels: int = 32,\n num_features: int = 1,\n act_layer: Optional[nn.Module] = None,\n norm_layer: Optional[Callable[[int], nn.Module]] = None,\n drop_layer: Optional[Callable[..., nn.Module]] = None,\n conv_layer: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n\n super().__init__(\n OrderedDict(\n [\n (\n \"features\",\n DarknetBodyV4(\n layout,\n in_channels,\n stem_channels,\n num_features,\n act_layer,\n norm_layer,\n drop_layer,\n conv_layer,\n ),\n ),\n (\"pool\", GlobalAvgPool2d(flatten=True)),\n (\"classifier\", nn.Linear(layout[-1][0], num_classes)),\n ]\n )\n )\n\n init_module(self, \"leaky_relu\")\n\n\ndef _darknet(arch: str, pretrained: bool, progress: bool, layout: List[Tuple[int, int]], **kwargs: Any) -> DarknetV4:\n # Build the model\n model = DarknetV4(layout, **kwargs)\n model.default_cfg = default_cfgs[arch] # type: ignore[assignment]\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, default_cfgs[arch][\"url\"], progress)\n\n return model\n\n\ndef cspdarknet53(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DarknetV4:\n \"\"\"CSP-Darknet-53 from\n `\"CSPNet: A New Backbone that can Enhance Learning Capability of CNN\" <https://arxiv.org/pdf/1911.11929.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n\n Returns:\n torch.nn.Module: classification model\n \"\"\"\n\n return _darknet(\"cspdarknet53\", pretrained, progress, [(64, 1), (128, 2), (256, 8), (512, 8), (1024, 4)], **kwargs)\n\n\ndef cspdarknet53_mish(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DarknetV4:\n \"\"\"Modified version of CSP-Darknet-53 from\n `\"CSPNet: A New Backbone that can Enhance Learning Capability of CNN\" <https://arxiv.org/pdf/1911.11929.pdf>`_\n with Mish as activation layer and DropBlock as regularization layer.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n\n Returns:\n torch.nn.Module: classification model\n \"\"\"\n\n kwargs[\"act_layer\"] = nn.Mish(inplace=True)\n kwargs[\"drop_layer\"] = DropBlock2d\n\n return _darknet(\n \"cspdarknet53_mish\", pretrained, progress, [(64, 1), (128, 2), (256, 8), (512, 8), (1024, 4)], **kwargs\n )\n",
"# Copyright (C) 2019-2022, François-Guillaume Fernandez.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport hashlib\n\nimport torch\n\n\ndef main(args):\n\n checkpoint = torch.load(args.checkpoint, map_location=\"cpu\")[\"model\"]\n torch.save(checkpoint, args.outfile, _use_new_zipfile_serialization=False)\n\n with open(args.outfile, \"rb\") as f:\n sha_hash = hashlib.sha256(f.read()).hexdigest()\n print(f\"Checkpoint saved to {args.outfile} with hash: {sha_hash[:8]}\")\n\n\ndef parse_args():\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Training checkpoint cleanup\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\"checkpoint\", type=str, help=\"path to the training checkpoint\")\n parser.add_argument(\"outfile\", type=str, help=\"model\")\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n"
] | [
[
"torch.sigmoid",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Unfold",
"torch.relu",
"torch.nn.AvgPool2d",
"torch.nn.modules.utils._pair",
"torch.nn.functional.pad"
],
[
"torch.nn.Linear",
"torch.nn.Mish",
"torch.nn.LeakyReLU"
],
[
"torch.load",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chnsh/deep-semantic-code-search | [
"57cf12b90b5ec3a49bd6c04cf2b68888162558b3",
"57cf12b90b5ec3a49bd6c04cf2b68888162558b3"
] | [
"code_summarization_transfer_learning/fastai/courses/dl1/fastai/imports.py",
"code_summarization_transfer_learning/fastai/courses/dl1/fastai/dataloader.py"
] | [
"from IPython.lib.deepreload import reload as dreload\nimport PIL, os, numpy as np, threading, json, bcolz, scipy\nimport pandas as pd, pickle, string, sys, re, time, shutil, copy\nimport seaborn as sns, matplotlib\nfrom abc import abstractmethod\nfrom functools import partial\nfrom pandas_summary import DataFrameSummary\nfrom IPython.lib.display import FileLink\nfrom sklearn import metrics, ensemble, preprocessing\nfrom operator import itemgetter, attrgetter\n\nfrom matplotlib import pyplot as plt, rcParams, animation\n\nmatplotlib.rc('animation', html='html5')\nnp.set_printoptions(precision=5, linewidth=110, suppress=True)\n\nfrom ipykernel.kernelapp import IPKernelApp\n\n\ndef in_notebook(): return IPKernelApp.initialized()\n\n\ndef in_ipynb():\n try:\n cls = get_ipython().__class__.__name__\n return cls == 'ZMQInteractiveShell'\n except NameError:\n return False\n\n\nimport tqdm as tq\n\n\ndef clear_tqdm():\n inst = getattr(tq.tqdm, '_instances', None)\n if not inst: return\n try:\n for i in range(len(inst)): inst.pop().close()\n except Exception:\n pass\n\n\nif in_notebook():\n def tqdm(*args, **kwargs):\n clear_tqdm()\n return tq.tqdm(*args, file=sys.stdout, **kwargs)\n\n\n def trange(*args, **kwargs):\n clear_tqdm()\n return tq.trange(*args, file=sys.stdout, **kwargs)\nelse:\n from tqdm import tqdm, trange\n\n tnrange = trange\n tqdm_notebook = tqdm\n",
"import collections\n\nfrom torch.utils.data.sampler import SequentialSampler, RandomSampler, BatchSampler\n\nfrom .core import *\n\nstring_classes = (str, bytes)\n\n\ndef get_tensor(batch, pin, half=False):\n if isinstance(batch, (np.ndarray, np.generic)):\n batch = T(batch, half=half, cuda=False).contiguous()\n if pin: batch = batch.pin_memory()\n return to_gpu(batch)\n elif isinstance(batch, string_classes):\n return batch\n elif isinstance(batch, collections.Mapping):\n return {k: get_tensor(sample, pin, half) for k, sample in batch.items()}\n elif isinstance(batch, collections.Sequence):\n return [get_tensor(sample, pin, half) for sample in batch]\n raise TypeError(f\"batch must contain numbers, dicts or lists; found {type(batch)}\")\n\n\nclass DataLoader(object):\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n pad_idx=0,\n num_workers=None, pin_memory=False, drop_last=False, pre_pad=True, half=False,\n transpose=False, transpose_y=False):\n self.dataset, self.batch_size, self.num_workers = dataset, batch_size, num_workers\n self.pin_memory, self.drop_last, self.pre_pad = pin_memory, drop_last, pre_pad\n self.transpose, self.transpose_y, self.pad_idx, self.half = transpose, transpose_y, pad_idx, half\n\n if batch_sampler is not None:\n if batch_size > 1 or shuffle or sampler is not None or drop_last:\n raise ValueError('batch_sampler is mutually exclusive with '\n 'batch_size, shuffle, sampler, and drop_last')\n\n if sampler is not None and shuffle:\n raise ValueError('sampler is mutually exclusive with shuffle')\n\n if batch_sampler is None:\n if sampler is None:\n sampler = RandomSampler(dataset) if shuffle else SequentialSampler(dataset)\n batch_sampler = BatchSampler(sampler, batch_size, drop_last)\n\n if num_workers is None:\n self.num_workers = num_cpus()\n\n self.sampler = sampler\n self.batch_sampler = batch_sampler\n\n def __len__(self):\n return len(self.batch_sampler)\n\n def jag_stack(self, b):\n if len(b[0].shape) not in (1, 2): return np.stack(b)\n ml = max(len(o) for o in b)\n if min(len(o) for o in b) == ml: return np.stack(b)\n res = np.zeros((len(b), ml), dtype=b[0].dtype) + self.pad_idx\n for i, o in enumerate(b):\n if self.pre_pad:\n res[i, -len(o):] = o\n else:\n res[i, :len(o)] = o\n return res\n\n def np_collate(self, batch):\n b = batch[0]\n if isinstance(b, (np.ndarray, np.generic)):\n return self.jag_stack(batch)\n elif isinstance(b, (int, float)):\n return np.array(batch)\n elif isinstance(b, string_classes):\n return batch\n elif isinstance(b, collections.Mapping):\n return {key: self.np_collate([d[key] for d in batch]) for key in b}\n elif isinstance(b, collections.Sequence):\n return [self.np_collate(samples) for samples in zip(*batch)]\n raise TypeError((\"batch must contain numbers, dicts or lists; found {}\".format(type(b))))\n\n def get_batch(self, indices):\n res = self.np_collate([self.dataset[i] for i in indices])\n if self.transpose: res[0] = res[0].T\n if self.transpose_y: res[1] = res[1].T\n return res\n\n def __iter__(self):\n if self.num_workers == 0:\n for batch in map(self.get_batch, iter(self.batch_sampler)):\n yield get_tensor(batch, self.pin_memory, self.half)\n else:\n with ThreadPoolExecutor(max_workers=self.num_workers) as e:\n # avoid py3.6 issue where queue is infinite and can result in memory exhaustion\n for c in chunk_iter(iter(self.batch_sampler), self.num_workers * 10):\n for batch in e.map(self.get_batch, c):\n yield get_tensor(batch, self.pin_memory, self.half)\n"
] | [
[
"numpy.set_printoptions",
"matplotlib.rc"
],
[
"torch.utils.data.sampler.SequentialSampler",
"torch.utils.data.sampler.RandomSampler",
"torch.utils.data.sampler.BatchSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SCUT-AILab/BPAI-Net | [
"d71c92366222c9e226e15f8263fc2d72361735c3"
] | [
"ops/models.py"
] | [
"# Code for \"TSM: Temporal Shift Module for Efficient Video Understanding\"\n# arXiv:1811.08383\n# Ji Lin*, Chuang Gan, Song Han\n# {jilin, songhan}@mit.edu, [email protected]\n\nfrom torch import nn\nfrom ops.basic_ops import ConsensusModule\nfrom ops.transforms import *\nfrom torch.nn.init import normal_, constant_\nfrom archs.fusion_model import fusion\nclass TSN(nn.Module):\n def __init__(self, num_class, num_segments, modality,patch_size,\n base_model='resnet101', new_length=None,\n consensus_type='avg', before_softmax=True,\n dropout=0.8, img_feature_dim=256,\n crop_num=1, partial_bn=True, print_spec=True, pretrain='imagenet',\n is_shift=False, shift_div=8, shift_place='blockres', fc_lr5=False,\n temporal_pool=False, non_local=False,first=None,second=None,gcn_stride=1,base_lr=0.001,concat_layer=5,\n xyc=False,bn=False,arch_cnn='mobilenetv2',gcn_dropout=0.5):\n super(TSN, self).__init__()\n self.num_class=num_class\n self.modality = modality\n self.num_segments = num_segments\n self.reshape = True\n self.print_spec = print_spec\n self.before_softmax = before_softmax\n self.dropout = dropout\n self.crop_num = crop_num\n self.consensus_type = consensus_type\n self.img_feature_dim = img_feature_dim # the dimension of the CNN feature to represent each frame\n self.pretrain = pretrain\n self.first = first\n self.second = second\n self.bn = bn\n self.gcn_stride = gcn_stride\n self.concat_layer=concat_layer\n self.xyc = xyc\n self.arch_cnn = arch_cnn\n self.patch_size=patch_size\n self.gcn_dropout = gcn_dropout\n\n self.is_shift = is_shift\n self.shift_div = shift_div\n self.shift_place = shift_place\n self.base_model_name = base_model\n self.fc_lr5 = fc_lr5\n self.temporal_pool = temporal_pool\n self.non_local = non_local\n self.base_lr = base_lr\n if not before_softmax and consensus_type != 'avg':\n raise ValueError(\"Only avg consensus can be used after Softmax\")\n\n if new_length is None:\n self.new_length = 1 if modality == \"RGB\" else 5\n else:\n self.new_length = new_length\n if print_spec:\n print((\"\"\"\n Initializing TSN with base model: {}.\n TSN Configurations:\n input_modality: {}\n num_segments: {}\n new_length: {}\n consensus_module: {}\n dropout_ratio: {}\n img_feature_dim: {}\n \"\"\".format(base_model, self.modality, self.num_segments, self.new_length, consensus_type, self.dropout, self.img_feature_dim)))\n\n self._prepare_base_model(base_model)\n\n feature_dim = self._prepare_tsn(num_class)\n\n if self.modality == 'Flow':\n print(\"Converting the ImageNet model to a flow init model\")\n self.base_model = self._construct_flow_model(self.base_model)\n print(\"Done. Flow model ready...\")\n elif self.modality == 'RGBDiff':\n print(\"Converting the ImageNet model to RGB+Diff init model\")\n self.base_model = self._construct_diff_model(self.base_model)\n print(\"Done. RGBDiff model ready.\")\n\n self.consensus = ConsensusModule(consensus_type)\n\n if not self.before_softmax:\n self.softmax = nn.Softmax()\n\n self._enable_pbn = partial_bn\n if partial_bn:\n self.partialBN(True)\n\n def _prepare_tsn(self, num_class):\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features\n if self.dropout == 0:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))\n self.new_fc = None\n else:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\n self.new_fc = nn.Linear(feature_dim, num_class)\n\n std = 0.001\n if self.new_fc is None:\n normal_(getattr(self.base_model, self.base_model.last_layer_name).weight, 0, std)\n constant_(getattr(self.base_model, self.base_model.last_layer_name).bias, 0)\n else:\n if hasattr(self.new_fc, 'weight'):\n normal_(self.new_fc.weight, 0, std)\n constant_(self.new_fc.bias, 0)\n return feature_dim\n\n def _prepare_base_model(self, base_model):\n print('=> base model: {}'.format(base_model))\n if base_model == 'fusion':\n self.base_model = fusion(self.num_class, True if self.pretrain == 'imagenet' else False, self.first,\n self.second,stride=self.gcn_stride, patch_size=self.patch_size,\n concat_layer=self.concat_layer, xyc=self.xyc, bn=self.bn,\n arch_cnn=self.arch_cnn,dropout=self.gcn_dropout)\n\n if 'resnet' in base_model or 'resnet' in self.arch_cnn:\n\n if 'resnet' in base_model:\n self.base_model = getattr(torchvision.models, base_model)(True if self.pretrain == 'imagenet' else False)\n if self.is_shift:\n print('Adding temporal shift...')\n from ops.temporal_shift import make_temporal_shift\n make_temporal_shift(self.base_model, self.num_segments,\n n_div=self.shift_div, place=self.shift_place, temporal_pool=self.temporal_pool)\n\n if self.non_local:\n print('Adding non-local module...')\n from ops.non_local import make_non_local\n make_non_local(self.base_model, self.num_segments)\n\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n self.base_model.avgpool = nn.AdaptiveAvgPool2d(1)\n\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n elif self.modality == 'RGBDiff':\n self.input_mean = [0.485, 0.456, 0.406] + [0] * 3 * self.new_length\n self.input_std = self.input_std + [np.mean(self.input_std) * 2] * 3 * self.new_length\n\n elif base_model == 'mobilenetv2' or self.arch_cnn == 'mobilenetv2':\n from archs.mobilenet_v2 import mobilenet_v2, InvertedResidual\n #from archs.online.mobilenet_v2_online import mobilenet_v2\n fc_name = 'fc'\n if base_model == 'mobilenetv2':\n self.base_model = mobilenet_v2(True if self.pretrain == 'imagenet' else False)\n fc_name = 'classifier'\n self.base_model.last_layer_name = fc_name\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n self.base_model.avgpool = nn.AdaptiveAvgPool2d(1)\n if self.is_shift:\n from ops.temporal_shift import TemporalShift\n if self.arch_cnn == 'mobilenetv2':\n modules = self.base_model.cnn.modules()\n else:\n modules = self.base_model.modules()\n for m in modules:\n if isinstance(m, InvertedResidual) and len(m.conv) == 8 and m.use_res_connect:\n if self.print_spec:\n print('Adding temporal shift... {}'.format(m.use_res_connect))\n m.conv[0] = TemporalShift(m.conv[0], n_segment=self.num_segments, n_div=self.shift_div)\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n elif self.modality == 'RGBDiff':\n self.input_mean = [0.485, 0.456, 0.406] + [0] * 3 * self.new_length\n self.input_std = self.input_std + [np.mean(self.input_std) * 2] * 3 * self.new_length\n else:\n raise ValueError('Unknown base model: {}'.format(base_model))\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n :return:\n \"\"\"\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn and mode:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n def partialBN(self, enable):\n self._enable_pbn = enable\n\n def get_optim_policies(self):\n first_conv_weight = []\n first_conv_bias = []\n normal_weight = []\n normal_bias = []\n lr5_weight = []\n lr10_bias = []\n bn = []\n custom_ops = []\n gcn = []\n conv_cnt = 0\n bn_cnt = 0\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d):\n ps = list(m.parameters())\n conv_cnt += 1\n if conv_cnt == 1:\n first_conv_weight.append(ps[0])\n if len(ps) == 2:\n first_conv_bias.append(ps[1])\n else:\n normal_weight.append(ps[0])\n if len(ps) == 2:\n normal_bias.append(ps[1])\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n if self.fc_lr5:\n lr5_weight.append(ps[0])\n else:\n normal_weight.append(ps[0])\n if len(ps) == 2:\n if self.fc_lr5:\n lr10_bias.append(ps[1])\n else:\n normal_bias.append(ps[1])\n\n elif isinstance(m, torch.nn.BatchNorm2d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm3d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm1d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n\n return [\n {'params': first_conv_weight, 'lr_mult': 5 if self.modality == 'Flow' else 1, 'decay_mult': 1,\n 'name': \"first_conv_weight\"},\n {'params': first_conv_bias, 'lr_mult': 10 if self.modality == 'Flow' else 2, 'decay_mult': 0,\n 'name': \"first_conv_bias\"},\n {'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"normal_weight\"},\n {'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,\n 'name': \"normal_bias\"},\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\n 'name': \"BN scale/shift\"},\n {'params': custom_ops, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"custom_ops\"},\n # for fc\n {'params': lr5_weight, 'lr_mult': 5, 'decay_mult': 1,\n 'name': \"lr5_weight\"},\n {'params': lr10_bias, 'lr_mult': 10, 'decay_mult': 0,\n 'name': \"lr10_bias\"},\n ]\n\n def forward(self,input,ske=None,boxes=None,no_reshape=False):# input torch.Size([8, 24, 224, 224])\n if not no_reshape:\n sample_len = (3 if self.modality == \"RGB\" else 2) * self.new_length\n\n if self.modality == 'RGBDiff':\n sample_len = 3 * self.new_length\n input = self._get_diff(input)\n # reshape size(BT,C,H,W)\n #todo\n if self.base_model_name == 'fusion':\n ske_result,base_out= self.base_model(input.view((-1, sample_len) + input.size()[-2:]),ske,boxes) #torch.Size([16, 2048])\n else:\n base_out = self.base_model(input.view((-1, sample_len) + input.size()[-2:]))\n else:\n if 'resnet' in self.base_model_name:\n base_out = self.base_model(input)\n elif self.base_model_name == 'fusion':\n ske_result,base_out= self.base_model(input,ske,boxes) #torch.Size([16, 2048])\n else:\n raise NotImplementedError('only support resnet and fusion model')\n\n\n if self.dropout > 0:\n base_out = self.new_fc(base_out) #torch.Size([64, 34])(BT,num_classes)\n\n if not self.before_softmax:\n base_out = self.softmax(base_out)\n\n\n if self.is_shift and self.temporal_pool:\n base_out = base_out.view((-1, self.num_segments // 2) + base_out.size()[1:])\n else:\n base_out = base_out.view((-1, self.num_segments) + base_out.size()[1:]) # (8,8,34)(B,T,num_classes)\n output = self.consensus(base_out).squeeze(1) # (8,1,34)\n\n if self.base_model_name == 'fusion':\n return (output,ske_result)\n else:\n return output\n\n\n def _get_diff(self, input, keep_rgb=False):\n input_c = 3 if self.modality in [\"RGB\", \"RGBDiff\"] else 2\n input_view = input.view((-1, self.num_segments, self.new_length + 1, input_c,) + input.size()[2:])\n if keep_rgb:\n new_data = input_view.clone()\n else:\n new_data = input_view[:, :, 1:, :, :, :].clone()\n\n for x in reversed(list(range(1, self.new_length + 1))):\n if keep_rgb:\n new_data[:, :, x, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n else:\n new_data[:, :, x - 1, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n\n return new_data\n\n def _construct_flow_model(self, base_model):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules)))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n new_kernel_size = kernel_size[:1] + (2 * self.new_length, ) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n\n new_conv = nn.Conv2d(2 * self.new_length, conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convlution layer\n setattr(container, layer_name, new_conv)\n\n if self.base_model_name == 'BNInception':\n import torch.utils.model_zoo as model_zoo\n sd = model_zoo.load_url('https://www.dropbox.com/s/35ftw2t4mxxgjae/BNInceptionFlow-ef652051.pth.tar?dl=1')\n base_model.load_state_dict(sd)\n print('=> Loading pretrained Flow weight done...')\n else:\n print('#' * 30, 'Warning! No Flow pretrained model is found')\n return base_model\n\n def _construct_diff_model(self, base_model, keep_rgb=False):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n if not keep_rgb:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n else:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = torch.cat((params[0].data, params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()),\n 1)\n new_kernel_size = kernel_size[:1] + (3 + 3 * self.new_length,) + kernel_size[2:]\n\n new_conv = nn.Conv2d(new_kernel_size[1], conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convolution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n @property\n def crop_size(self):\n return self.input_size\n\n @property\n def scale_size(self):\n return self.input_size * 256 // 224\n\n def get_augmentation(self, flip=True):\n if self.modality == 'RGB':\n if flip:\n #return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66])])\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66]),\n GroupRandomHorizontalFlip(is_flow=False)])\n else:\n print('#' * 20, 'NO FLIP!!!')\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66])])\n elif self.modality == 'Flow':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75]),\n GroupRandomHorizontalFlip(is_flow=True)])\n elif self.modality == 'RGBDiff':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75]),\n GroupRandomHorizontalFlip(is_flow=False)])\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_",
"torch.utils.model_zoo.load_url"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
csingh27sewts/rlpyt | [
"4252eb63515c9e68c0674fb010d2c6dbfdac9122"
] | [
"rlpyt/envs/dm_control_env.py"
] | [
"from dm_control import suite\nfrom dm_control.suite.wrappers import pixels\nfrom dm_env.specs import Array, BoundedArray\n\nimport numpy as np\nimport os\nimport atari_py\nimport cv2\nimport copy\nfrom collections import namedtuple, OrderedDict\nfrom rlpyt.utils.collections import namedarraytuple\n\nfrom rlpyt.envs.base import Env, EnvStep, EnvSpaces\nfrom rlpyt.spaces.box import Box\nfrom rlpyt.spaces.composite import Composite\nfrom rlpyt.utils.quick_args import save__init__args\nfrom rlpyt.samplers.collections import TrajInfo\n\nState = None\n\ndef convert_dm_control_to_rlpyt_space(dm_control_space):\n \"\"\"Recursively convert dm_control_space into gym space.\n\n Note: Need to check the following cases of the input type, in the following\n order:\n (1) BoundedArray\n (2) Array\n (3) OrderedDict.\n\n - Generally, dm_control observation_specs are OrderedDict with other spaces\n (e.g. Array) nested in it.\n - Generally, dm_control action_specs are of type `BoundedArray`.\n\n To handle dm_control observation_specs as inputs, we check the following\n input types in order to enable recursive calling on each nested item.\n \"\"\"\n if isinstance(dm_control_space, BoundedArray):\n rlpyt_box = Box(\n low=dm_control_space.minimum,\n high=dm_control_space.maximum,\n shape=None,\n dtype=dm_control_space.dtype)\n assert rlpyt_box.shape == dm_control_space.shape, (\n (rlpyt_box.shape, dm_control_space.shape))\n return rlpyt_box\n elif isinstance(dm_control_space, Array):\n if isinstance(dm_control_space, BoundedArray):\n raise ValueError(\"The order of the if-statements matters.\")\n return Box(\n low=-float(\"inf\"),\n high=float(\"inf\"),\n shape=dm_control_space.shape,\n dtype=dm_control_space.dtype)\n elif isinstance(dm_control_space, OrderedDict):\n global State\n if State is None:\n State = namedtuple('State', list(dm_control_space.keys()))\n return Composite([convert_dm_control_to_rlpyt_space(value)\n for value in dm_control_space.values()], State)\n else:\n raise ValueError(dm_control_space)\n\nEnvInfo = None\nObservation = None\n\ndef init_namedtuples(info_keys=None, state_keys=None):\n global EnvInfo, Observation, State\n\n if info_keys is None:\n info_keys = ['traj_done']\n\n if state_keys is None:\n state_keys = ['pixels']\n\n EnvInfo = namedtuple('EnvInfo', info_keys)\n Observation = namedarraytuple('Observation', state_keys)\n State = namedtuple('State', state_keys)\n\nclass DMControlEnv(Env):\n\n def __init__(self,\n domain,\n task,\n frame_skip=1,\n normalize=False,\n pixel_wrapper_kwargs=None,\n task_kwargs={},\n environment_kwargs={},\n max_path_length=1200,\n ):\n save__init__args(locals(), underscore=True)\n\n env = suite.load(domain_name=domain,\n task_name=task,\n task_kwargs=task_kwargs,\n environment_kwargs=environment_kwargs)\n if normalize:\n np.testing.assert_equal(env.action_spec().minimum, -1)\n np.testing.assert_equal(env.action_spec().maximum, 1)\n if pixel_wrapper_kwargs is not None:\n env = pixels.Wrapper(env, **pixel_wrapper_kwargs)\n self._env = env\n\n self._observation_keys = tuple(env.observation_spec().keys())\n observation_space = convert_dm_control_to_rlpyt_space(\n env.observation_spec())\n self._observation_space = observation_space\n\n action_space = convert_dm_control_to_rlpyt_space(env.action_spec())\n if len(action_space.shape) > 1:\n raise NotImplementedError(\n \"Shape of the action space ({}) is not flat, make sure to\"\n \" check the implemenation.\".format(action_space))\n self._action_space = action_space\n\n self._step_count = 0\n\n def reset(self):\n self._step_count = 0\n time_step = self._env.reset()\n observation = self._filter_observation(time_step.observation)\n\n global Observation\n if Observation is None:\n Observation = namedarraytuple(\"Observation\", list(observation.keys()))\n observation = Observation(**{k: v for k, v in observation.items()\n if k in self._observation_keys})\n return observation\n\n def step(self, action):\n time_step = self._env.step(action)\n reward = time_step.reward\n terminal = time_step.last()\n info = time_step.info\n info.update({\n key: value\n for key, value in time_step.observation.items()\n if key not in self._observation_keys\n })\n observation = self._filter_observation(time_step.observation)\n\n self._step_count += 1\n info['traj_done'] = self._step_count >= self._max_path_length\n\n global EnvInfo\n if EnvInfo is None:\n EnvInfo = namedtuple(\"EnvInfo\", list(info.keys()))\n info = EnvInfo(**{k: v for k, v in info.items() if k in EnvInfo._fields})\n\n global Observation\n if Observation is None:\n Observation = namedarraytuple(\"Observation\", list(observation.keys()))\n observation = Observation(**{k: v.copy() for k, v in observation.items()\n if k in self._observation_keys})\n\n return EnvStep(observation, reward, terminal, info)\n\n def render(self, *args, mode='rgb_array', width=256, height=256,\n cameria_id=0, **kwargs):\n if mode == 'human':\n raise NotImplementedError(\n \"TODO(Alacarter): Figure out how to not continuously launch\"\n \" viewers if one is already open.\"\n \" See: https://github.com/deepmind/dm_control/issues/39.\")\n elif mode == 'rgb_array':\n return self._env.physics.render(width=width, height=height,\n camera_id=cameria_id, **kwargs)\n raise NotImplementedError(mode)\n\n def get_obs(self):\n obs = self._env.task.get_observation(self._env.physics)\n obs['pixels'] = self._env.physics.render(**self._env._render_kwargs)\n obs = self._filter_observation(obs)\n obs = Observation(**{k: v for k, v in obs.items()\n if k in self._observation_keys})\n return obs\n\n def get_state(self, ignore_step=True):\n if ignore_step:\n return self._env.physics.get_state()\n return self._env.physics.get_state(), self._step_count\n\n def set_state(self, state, ignore_step=True):\n if ignore_step:\n self._env.physics.set_state(state)\n self._env.step(np.zeros(self.action_space.shape))\n else:\n self._env.physics.set_state(state[0])\n self._env.step(np.zeros(self.action_space.shape))\n self._step_count = state[1]\n\n def get_geoms(self):\n return self._env.task.get_geoms(self._env.physics)\n\n @property\n def spaces(self):\n return EnvSpaces(\n observation=self._observation_space,\n action=self._action_space,\n )\n\n ###########################################################################\n # Helpers\n\n def _filter_observation(self, observation):\n observation = type(observation)([\n (name, value)\n for name, value in observation.items()\n if name in self._observation_keys\n ])\n return observation\n\n ###########################################################################\n # Properties\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MUST-AI-Lab/NAS-Projects | [
"1ce3249a5a58af3506b8c9af977008ddf8198445",
"1ce3249a5a58af3506b8c9af977008ddf8198445",
"fcb2aae34a2b3c02877fbdb41cda45e1e73327a6",
"fcb2aae34a2b3c02877fbdb41cda45e1e73327a6"
] | [
"exps/NAS-Bench-201/statistics.py",
"lib/models/cell_infers/tiny_network.py",
"exps/algos/DARTS-V2.py",
"lib/nas_201_api/api.py"
] | [
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #\n##################################################\nimport os, sys, time, argparse, collections\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nfrom collections import defaultdict\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom config_utils import load_config, dict2config\nfrom datasets import get_datasets\n# NAS-Bench-201 related module or function\nfrom models import CellStructure, get_cell_based_tiny_net\nfrom nas_201_api import ArchResults, ResultsCount\nfrom functions import pure_evaluate\n\n\n\ndef create_result_count(used_seed, dataset, arch_config, results, dataloader_dict):\n xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], \\\n results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)\n\n net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes':arch_config['class_num']}, None)\n network = get_cell_based_tiny_net(net_config)\n network.load_state_dict(xresult.get_net_param())\n if 'train_times' in results: # new version\n xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])\n xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])\n else:\n if dataset == 'cifar10-valid':\n xresult.update_OLD_eval('x-valid' , results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda())\n xresult.update_OLD_eval('ori-test', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n xresult.update_latency(latencies)\n elif dataset == 'cifar10':\n xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())\n xresult.update_latency(latencies)\n elif dataset == 'cifar100' or dataset == 'ImageNet16-120':\n xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda())\n xresult.update_OLD_eval('x-valid', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())\n xresult.update_OLD_eval('x-test' , {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n xresult.update_latency(latencies)\n else:\n raise ValueError('invalid dataset name : {:}'.format(dataset))\n return xresult\n \n\n\ndef account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict):\n information = ArchResults(arch_index, arch_str)\n\n for checkpoint_path in checkpoints:\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]\n for dataset in datasets:\n assert dataset in checkpoint, 'Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path)\n results = checkpoint[dataset]\n assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path)\n arch_config = {'channel': results['channel'], 'num_cells': results['num_cells'], 'arch_str': arch_str, 'class_num': results['config']['class_num']}\n \n xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict)\n information.update(dataset, int(used_seed), xresult)\n return information\n\n\n\ndef GET_DataLoaders(workers):\n\n torch.set_num_threads(workers)\n\n root_dir = (Path(__file__).parent / '..' / '..').resolve()\n torch_dir = Path(os.environ['TORCH_HOME'])\n # cifar\n cifar_config_path = root_dir / 'configs' / 'nas-benchmark' / 'CIFAR.config'\n cifar_config = load_config(cifar_config_path, None, None)\n print ('{:} Create data-loader for all datasets'.format(time_string()))\n print ('-'*200)\n TRAIN_CIFAR10, VALID_CIFAR10, xshape, class_num = get_datasets('cifar10', str(torch_dir/'cifar.python'), -1)\n print ('original CIFAR-10 : {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR10), len(VALID_CIFAR10), xshape, class_num))\n cifar10_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar-split.txt', None, None)\n assert cifar10_splits.train[:10] == [0, 5, 7, 11, 13, 15, 16, 17, 20, 24] and cifar10_splits.valid[:10] == [1, 2, 3, 4, 6, 8, 9, 10, 12, 14]\n temp_dataset = deepcopy(TRAIN_CIFAR10)\n temp_dataset.transform = VALID_CIFAR10.transform\n # data loader\n trainval_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, shuffle=True , num_workers=workers, pin_memory=True)\n train_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.train), num_workers=workers, pin_memory=True)\n valid_cifar10_loader = torch.utils.data.DataLoader(temp_dataset , batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.valid), num_workers=workers, pin_memory=True)\n test__cifar10_loader = torch.utils.data.DataLoader(VALID_CIFAR10, batch_size=cifar_config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)\n print ('CIFAR-10 : trval-loader has {:3d} batch with {:} per batch'.format(len(trainval_cifar10_loader), cifar_config.batch_size))\n print ('CIFAR-10 : train-loader has {:3d} batch with {:} per batch'.format(len(train_cifar10_loader), cifar_config.batch_size))\n print ('CIFAR-10 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_cifar10_loader), cifar_config.batch_size))\n print ('CIFAR-10 : test--loader has {:3d} batch with {:} per batch'.format(len(test__cifar10_loader), cifar_config.batch_size))\n print ('-'*200)\n # CIFAR-100\n TRAIN_CIFAR100, VALID_CIFAR100, xshape, class_num = get_datasets('cifar100', str(torch_dir/'cifar.python'), -1)\n print ('original CIFAR-100: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR100), len(VALID_CIFAR100), xshape, class_num))\n cifar100_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar100-test-split.txt', None, None)\n assert cifar100_splits.xvalid[:10] == [1, 3, 4, 5, 8, 10, 13, 14, 15, 16] and cifar100_splits.xtest[:10] == [0, 2, 6, 7, 9, 11, 12, 17, 20, 24]\n train_cifar100_loader = torch.utils.data.DataLoader(TRAIN_CIFAR100, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)\n valid_cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True)\n test__cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest) , num_workers=workers, pin_memory=True)\n print ('CIFAR-100 : train-loader has {:3d} batch'.format(len(train_cifar100_loader)))\n print ('CIFAR-100 : valid-loader has {:3d} batch'.format(len(valid_cifar100_loader)))\n print ('CIFAR-100 : test--loader has {:3d} batch'.format(len(test__cifar100_loader)))\n print ('-'*200)\n\n imagenet16_config_path = 'configs/nas-benchmark/ImageNet-16.config'\n imagenet16_config = load_config(imagenet16_config_path, None, None)\n TRAIN_ImageNet16_120, VALID_ImageNet16_120, xshape, class_num = get_datasets('ImageNet16-120', str(torch_dir/'cifar.python'/'ImageNet16'), -1)\n print ('original TRAIN_ImageNet16_120: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_ImageNet16_120), len(VALID_ImageNet16_120), xshape, class_num))\n imagenet_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'imagenet-16-120-test-split.txt', None, None)\n assert imagenet_splits.xvalid[:10] == [1, 2, 3, 6, 7, 8, 9, 12, 16, 18] and imagenet_splits.xtest[:10] == [0, 4, 5, 10, 11, 13, 14, 15, 17, 20]\n train_imagenet_loader = torch.utils.data.DataLoader(TRAIN_ImageNet16_120, batch_size=imagenet16_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)\n valid_imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xvalid), num_workers=workers, pin_memory=True)\n test__imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xtest) , num_workers=workers, pin_memory=True)\n print ('ImageNet-16-120 : train-loader has {:3d} batch with {:} per batch'.format(len(train_imagenet_loader), imagenet16_config.batch_size))\n print ('ImageNet-16-120 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_imagenet_loader), imagenet16_config.batch_size))\n print ('ImageNet-16-120 : test--loader has {:3d} batch with {:} per batch'.format(len(test__imagenet_loader), imagenet16_config.batch_size))\n\n # 'cifar10', 'cifar100', 'ImageNet16-120'\n loaders = {'cifar10@trainval': trainval_cifar10_loader,\n 'cifar10@train' : train_cifar10_loader,\n 'cifar10@valid' : valid_cifar10_loader,\n 'cifar10@test' : test__cifar10_loader,\n 'cifar100@train' : train_cifar100_loader,\n 'cifar100@valid' : valid_cifar100_loader,\n 'cifar100@test' : test__cifar100_loader,\n 'ImageNet16-120@train': train_imagenet_loader,\n 'ImageNet16-120@valid': valid_imagenet_loader,\n 'ImageNet16-120@test' : test__imagenet_loader}\n return loaders\n\n\n\ndef simplify(save_dir, meta_file, basestr, target_dir):\n meta_infos = torch.load(meta_file, map_location='cpu')\n meta_archs = meta_infos['archs'] # a list of architecture strings\n meta_num_archs = meta_infos['total']\n meta_max_node = meta_infos['max_node']\n assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))\n\n sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))\n print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))\n \n subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0\n num_seeds = defaultdict(lambda: 0)\n for index, sub_dir in enumerate(sub_model_dirs):\n xcheckpoints = list(sub_dir.glob('arch-*-seed-*.pth'))\n arch_indexes = set()\n for checkpoint in xcheckpoints:\n temp_names = checkpoint.name.split('-')\n assert len(temp_names) == 4 and temp_names[0] == 'arch' and temp_names[2] == 'seed', 'invalid checkpoint name : {:}'.format(checkpoint.name)\n arch_indexes.add( temp_names[1] )\n subdir2archs[sub_dir] = sorted(list(arch_indexes))\n num_evaluated_arch += len(arch_indexes)\n # count number of seeds for each architecture\n for arch_index in arch_indexes:\n num_seeds[ len(list(sub_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))) ] += 1\n print('{:} There are {:5d} architectures that have been evaluated ({:} in total).'.format(time_string(), num_evaluated_arch, meta_num_archs))\n for key in sorted( list( num_seeds.keys() ) ): print ('{:} There are {:5d} architectures that are evaluated {:} times.'.format(time_string(), num_seeds[key], key))\n\n dataloader_dict = GET_DataLoaders( 6 )\n\n to_save_simply = save_dir / 'simplifies'\n to_save_allarc = save_dir / 'simplifies' / 'architectures'\n if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)\n if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True)\n\n assert (save_dir / target_dir) in subdir2archs, 'can not find {:}'.format(target_dir)\n arch2infos, datasets = {}, ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120')\n evaluated_indexes = set()\n target_directory = save_dir / target_dir\n target_less_dir = save_dir / '{:}-LESS'.format(target_dir)\n arch_indexes = subdir2archs[ target_directory ]\n num_seeds = defaultdict(lambda: 0)\n end_time = time.time()\n arch_time = AverageMeter()\n for idx, arch_index in enumerate(arch_indexes):\n checkpoints = list(target_directory.glob('arch-{:}-seed-*.pth'.format(arch_index)))\n ckps_less = list(target_less_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))\n # create the arch info for each architecture\n try:\n arch_info_full = account_one_arch(arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict)\n arch_info_less = account_one_arch(arch_index, meta_archs[int(arch_index)], ckps_less, ['cifar10-valid'], dataloader_dict)\n num_seeds[ len(checkpoints) ] += 1\n except:\n print('Loading {:} failed, : {:}'.format(arch_index, checkpoints))\n continue\n assert int(arch_index) not in evaluated_indexes, 'conflict arch-index : {:}'.format(arch_index)\n assert 0 <= int(arch_index) < len(meta_archs), 'invalid arch-index {:} (not found in meta_archs)'.format(arch_index)\n arch_info = {'full': arch_info_full, 'less': arch_info_less}\n evaluated_indexes.add( int(arch_index) )\n arch2infos[int(arch_index)] = arch_info\n torch.save({'full': arch_info_full.state_dict(),\n 'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-FULL.pth'.format(arch_index))\n arch_info['full'].clear_params()\n arch_info['less'].clear_params()\n torch.save({'full': arch_info_full.state_dict(),\n 'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-SIMPLE.pth'.format(arch_index))\n # measure elapsed time\n arch_time.update(time.time() - end_time)\n end_time = time.time()\n need_time = '{:}'.format( convert_secs2time(arch_time.avg * (len(arch_indexes)-idx-1), True) )\n print('{:} {:} [{:03d}/{:03d}] : {:} still need {:}'.format(time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time))\n # measure time\n xstrs = ['{:}:{:03d}'.format(key, num_seeds[key]) for key in sorted( list( num_seeds.keys() ) ) ]\n print('{:} {:} done : {:}'.format(time_string(), target_dir, xstrs))\n final_infos = {'meta_archs' : meta_archs,\n 'total_archs': meta_num_archs,\n 'basestr' : basestr,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = to_save_simply / '{:}.pth'.format(target_dir)\n torch.save(final_infos, save_file_name)\n print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))\n\n\n\ndef merge_all(save_dir, meta_file, basestr):\n meta_infos = torch.load(meta_file, map_location='cpu')\n meta_archs = meta_infos['archs']\n meta_num_archs = meta_infos['total']\n meta_max_node = meta_infos['max_node']\n assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))\n\n sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))\n print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))\n for index, sub_dir in enumerate(sub_model_dirs):\n arch_info_files = sorted( list(sub_dir.glob('arch-*-seed-*.pth') ) )\n print ('The {:02d}/{:02d}-th directory : {:} : {:} runs.'.format(index, len(sub_model_dirs), sub_dir, len(arch_info_files)))\n \n arch2infos, evaluated_indexes = dict(), set()\n for IDX, sub_dir in enumerate(sub_model_dirs):\n ckp_path = sub_dir.parent / 'simplifies' / '{:}.pth'.format(sub_dir.name)\n if ckp_path.exists():\n sub_ckps = torch.load(ckp_path, map_location='cpu')\n assert sub_ckps['total_archs'] == meta_num_archs and sub_ckps['basestr'] == basestr\n xarch2infos = sub_ckps['arch2infos']\n xevalindexs = sub_ckps['evaluated_indexes']\n for eval_index in xevalindexs:\n assert eval_index not in evaluated_indexes and eval_index not in arch2infos\n #arch2infos[eval_index] = xarch2infos[eval_index].state_dict()\n arch2infos[eval_index] = {'full': xarch2infos[eval_index]['full'].state_dict(),\n 'less': xarch2infos[eval_index]['less'].state_dict()}\n evaluated_indexes.add( eval_index )\n print ('{:} [{:03d}/{:03d}] merge data from {:} with {:} models.'.format(time_string(), IDX, len(sub_model_dirs), ckp_path, len(xevalindexs)))\n else:\n raise ValueError('Can not find {:}'.format(ckp_path))\n #print ('{:} [{:03d}/{:03d}] can not find {:}, skip.'.format(time_string(), IDX, len(subdir2archs), ckp_path))\n\n evaluated_indexes = sorted( list( evaluated_indexes ) )\n print ('Finally, there are {:} architectures that have been trained and evaluated.'.format(len(evaluated_indexes)))\n\n to_save_simply = save_dir / 'simplifies'\n if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)\n final_infos = {'meta_archs' : meta_archs,\n 'total_archs': meta_num_archs,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = to_save_simply / '{:}-final-infos.pth'.format(basestr)\n torch.save(final_infos, save_file_name)\n print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='NAS-BENCH-201', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--mode' , type=str, choices=['cal', 'merge'], help='The running mode for this script.')\n parser.add_argument('--base_save_dir', type=str, default='./output/NAS-BENCH-201-4', help='The base-name of folder to save checkpoints and log.')\n parser.add_argument('--target_dir' , type=str, help='The target directory.')\n parser.add_argument('--max_node' , type=int, default=4, help='The maximum node in a cell.')\n parser.add_argument('--channel' , type=int, default=16, help='The number of channels.')\n parser.add_argument('--num_cells' , type=int, default=5, help='The number of cells in one stage.')\n args = parser.parse_args()\n \n save_dir = Path( args.base_save_dir )\n meta_path = save_dir / 'meta-node-{:}.pth'.format(args.max_node)\n assert save_dir.exists(), 'invalid save dir path : {:}'.format(save_dir)\n assert meta_path.exists(), 'invalid saved meta path : {:}'.format(meta_path)\n print ('start the statistics of our nas-benchmark from {:} using {:}.'.format(save_dir, args.target_dir))\n basestr = 'C{:}-N{:}'.format(args.channel, args.num_cells)\n \n if args.mode == 'cal':\n simplify(save_dir, meta_path, basestr, args.target_dir)\n elif args.mode == 'merge':\n merge_all(save_dir, meta_path, basestr)\n else:\n raise ValueError('invalid mode : {:}'.format(args.mode))\n",
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n##################################################\nimport torch\nimport torch.nn as nn\nfrom ..cell_operations import ResNetBasicblock\nfrom .cells import InferCell\n\n\n# The macro structure for architectures in NAS-Bench-201\nclass TinyNetwork(nn.Module):\n\n def __init__(self, C, N, genotype, num_classes):\n super(TinyNetwork, self).__init__()\n self._C = C\n self._layerN = N\n\n self.stem = nn.Sequential(\n nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(C))\n \n layer_channels = [C ] * N + [C*2 ] + [C*2 ] * N + [C*4 ] + [C*4 ] * N \n layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N\n\n C_prev = C\n self.cells = nn.ModuleList()\n for index, (C_curr, reduction) in enumerate(zip(layer_channels, layer_reductions)):\n if reduction:\n cell = ResNetBasicblock(C_prev, C_curr, 2, True)\n else:\n cell = InferCell(genotype, C_prev, C_curr, 1)\n self.cells.append( cell )\n C_prev = cell.out_dim\n self._Layer= len(self.cells)\n\n self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(C_prev, num_classes)\n\n def get_message(self):\n string = self.extra_repr()\n for i, cell in enumerate(self.cells):\n string += '\\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())\n return string\n\n def extra_repr(self):\n return ('{name}(C={_C}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__))\n\n def forward(self, inputs):\n feature = self.stem(inputs)\n for i, cell in enumerate(self.cells):\n feature = cell(feature)\n\n out = self.lastact(feature)\n out = self.global_pooling( out )\n out = out.view(out.size(0), -1)\n logits = self.classifier(out)\n\n return out, logits\n",
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n########################################################\n# DARTS: Differentiable Architecture Search, ICLR 2019 #\n########################################################\nimport os, sys, time, glob, random, argparse\nimport numpy as np\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import load_config, dict2config, configure2str\nfrom datasets import get_datasets, get_nas_search_loaders\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler\nfrom utils import get_model_infos, obtain_accuracy\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom models import get_cell_based_tiny_net, get_search_spaces\nfrom nas_201_api import NASBench201API as API\nfrom torch.autograd import Variable\n\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\ndef _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=1e-2):\n R = r / _concat(vector).norm()\n for p, v in zip(network.module.get_weights(), vector):\n p.data.add_(R, v)\n _, logits = network(base_inputs)\n loss = criterion(logits, base_targets)\n grads_p = torch.autograd.grad(loss, network.module.get_alphas())\n\n for p, v in zip(network.module.get_weights(), vector):\n p.data.sub_(2*R, v)\n _, logits = network(base_inputs)\n loss = criterion(logits, base_targets)\n grads_n = torch.autograd.grad(loss, network.module.get_alphas())\n\n for p, v in zip(network.module.get_weights(), vector):\n p.data.add_(R, v)\n return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]\n\n\ndef backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets):\n # _compute_unrolled_model\n _, logits = network(base_inputs)\n loss = criterion(logits, base_targets)\n LR, WD, momentum = w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']\n with torch.no_grad():\n theta = _concat(network.module.get_weights())\n try:\n moment = _concat(w_optimizer.state[v]['momentum_buffer'] for v in network.module.get_weights())\n moment = moment.mul_(momentum)\n except:\n moment = torch.zeros_like(theta)\n dtheta = _concat(torch.autograd.grad(loss, network.module.get_weights())) + WD*theta\n params = theta.sub(LR, moment+dtheta)\n unrolled_model = deepcopy(network)\n model_dict = unrolled_model.state_dict()\n new_params, offset = {}, 0\n for k, v in network.named_parameters():\n if 'arch_normal_parameters' in k or 'arch_reduce_parameters': continue\n v_length = np.prod(v.size())\n new_params[k] = params[offset: offset+v_length].view(v.size())\n offset += v_length\n model_dict.update(new_params)\n unrolled_model.load_state_dict(model_dict)\n\n unrolled_model.zero_grad()\n _, unrolled_logits = unrolled_model(arch_inputs)\n unrolled_loss = criterion(unrolled_logits, arch_targets)\n unrolled_loss.backward()\n\n dalpha = [v.grad for v in unrolled_model.module.get_alphas()]\n vector = [v.grad.data for v in unrolled_model.module.get_weights()]\n implicit_grads = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)\n\n for g, ig in zip(dalpha, implicit_grads):\n g.data.sub_(LR, ig.data)\n\n for v, g in zip(network.module.get_alphas(), dalpha):\n if v.grad is None:\n v.grad = Variable(g.data)\n else:\n v.grad.data.copy_(g.data)\n\n # if network.module.arch_parameters.grad is None:\n # network.module.arch_parameters.grad = deepcopy( dalpha )\n # else:\n # network.module.arch_parameters.grad.data.copy_( dalpha.data )\n return unrolled_loss.detach(), unrolled_logits.detach()\n \n\ndef search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):\n data_time, batch_time = AverageMeter(), AverageMeter()\n base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n network.train()\n end = time.time()\n for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):\n scheduler.update(None, 1.0 * step / len(xloader))\n base_targets = base_targets.cuda(non_blocking=True)\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n\n # update the architecture-weight\n a_optimizer.zero_grad()\n arch_loss, arch_logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets)\n a_optimizer.step()\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(arch_logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n \n # update the weights\n w_optimizer.zero_grad()\n _, logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n base_loss.backward()\n torch.nn.utils.clip_grad_norm_(network.parameters(), 5)\n w_optimizer.step()\n # record\n base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))\n base_losses.update(base_loss.item(), base_inputs.size(0))\n base_top1.update (base_prec1.item(), base_inputs.size(0))\n base_top5.update (base_prec5.item(), base_inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if step % print_freq == 0 or step + 1 == len(xloader):\n Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))\n Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)\n Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)\n Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)\n logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)\n return base_losses.avg, base_top1.avg, base_top5.avg\n\n\ndef valid_func(xloader, network, criterion):\n data_time, batch_time = AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n network.eval()\n end = time.time()\n with torch.no_grad():\n for step, (arch_inputs, arch_targets) in enumerate(xloader):\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n # prediction\n _, logits = network(arch_inputs)\n arch_loss = criterion(logits, arch_targets)\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n return arch_losses.avg, arch_top1.avg, arch_top5.avg\n\n\ndef main(xargs):\n assert torch.cuda.is_available(), 'CUDA is not available.'\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_num_threads( xargs.workers )\n prepare_seed(xargs.rand_seed)\n logger = prepare_logger(args)\n\n train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)\n config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, logger)\n search_loader, _, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, '/disk1/hw/AutoDL-Projects/configs/nas-benchmark/', config.batch_size, xargs.workers)\n logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size))\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n\n search_space = get_search_spaces('cell', xargs.search_space_name)\n\n if xargs.model_config is None:\n model_config = dict2config({'name': 'DARTS-V2', 'C': xargs.channel, 'N': xargs.num_cells,\n 'max_nodes': xargs.max_nodes, 'num_classes': class_num,\n 'space': search_space,\n 'affine': False, 'track_running_stats': bool(xargs.track_running_stats)}, None)\n else:\n model_config = load_config(xargs.model_config, {'num_classes': class_num, 'space' : search_space,\n 'affine' : False, 'track_running_stats': bool(xargs.track_running_stats)}, None)\n search_model = get_cell_based_tiny_net(model_config)\n logger.log('search-model :\\n{:}'.format(search_model))\n \n w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)\n a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay)\n logger.log('w-optimizer : {:}'.format(w_optimizer))\n logger.log('a-optimizer : {:}'.format(a_optimizer))\n logger.log('w-scheduler : {:}'.format(w_scheduler))\n logger.log('criterion : {:}'.format(criterion))\n flop, param = get_model_infos(search_model, xshape)\n #logger.log('{:}'.format(search_model))\n logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param))\n if xargs.arch_nas_dataset is None:\n api = None\n else:\n api = API(xargs.arch_nas_dataset)\n logger.log('{:} create API = {:} done'.format(time_string(), api))\n\n last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best')\n network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()\n\n if last_info.exists(): # automatically resume from previous checkpoint\n logger.log(\"=> loading checkpoint of the last-info '{:}' start\".format(last_info))\n last_info = torch.load(last_info)\n start_epoch = last_info['epoch']\n checkpoint = torch.load(last_info['last_checkpoint'])\n genotypes = checkpoint['genotypes']\n valid_accuracies = checkpoint['valid_accuracies']\n search_model.load_state_dict( checkpoint['search_model'] )\n w_scheduler.load_state_dict ( checkpoint['w_scheduler'] )\n w_optimizer.load_state_dict ( checkpoint['w_optimizer'] )\n a_optimizer.load_state_dict ( checkpoint['a_optimizer'] )\n logger.log(\"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.\".format(last_info, start_epoch))\n else:\n logger.log(\"=> do not find the last-info file : {:}\".format(last_info))\n start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: search_model.genotype()}\n\n # start training\n start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup\n for epoch in range(start_epoch, total_epoch):\n w_scheduler.update(epoch, 0.0)\n need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) )\n epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)\n min_LR = min(w_scheduler.get_lr())\n logger.log('\\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min_LR))\n\n search_w_loss, search_w_top1, search_w_top5 = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger)\n search_time.update(time.time() - start_time)\n logger.log('[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum))\n valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))\n # check the best accuracy\n valid_accuracies[epoch] = valid_a_top1\n if valid_a_top1 > valid_accuracies['best']:\n valid_accuracies['best'] = valid_a_top1\n genotypes['best'] = search_model.genotype()\n find_best = True\n else: find_best = False\n\n genotypes[epoch] = search_model.genotype()\n logger.log(\"test3 the number of skip-connect: \" + str(genotypes[epoch]['n_normal']))\n logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch]))\n # save checkpoint\n save_path = save_checkpoint({'epoch' : epoch + 1,\n 'args' : deepcopy(xargs),\n 'search_model': search_model.state_dict(),\n 'w_optimizer' : w_optimizer.state_dict(),\n 'a_optimizer' : a_optimizer.state_dict(),\n 'w_scheduler' : w_scheduler.state_dict(),\n 'genotypes' : genotypes,\n 'valid_accuracies' : valid_accuracies},\n model_base_path, logger)\n last_info = save_checkpoint({\n 'epoch': epoch + 1,\n 'args' : deepcopy(args),\n 'last_checkpoint': save_path,\n }, logger.path('info'), logger)\n if find_best:\n logger.log('<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.'.format(epoch_str, valid_a_top1))\n copy_checkpoint(model_base_path, model_best_path, logger)\n with torch.no_grad():\n logger.log('{:}'.format(search_model.show_alphas()))\n if api is not None: logger.log('{:}'.format(api.query_by_arch( genotypes[epoch] )))\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n if genotypes[epoch]['n_normal'] >= 2:\n break\n\n logger.log('\\n' + '-'*100)\n # check the performance from the architecture dataset\n # logger.log('DARTS-V2 : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(total_epoch, search_time.sum, genotypes[total_epoch-1]))\n logger.log('DARTS-V2 : run {:} epochs, cost {:.1f} s.'.format(total_epoch, search_time.sum))\n # if api is not None: logger.log('{:}'.format( api.query_by_arch(genotypes[total_epoch-1]) ))\n logger.close()\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"DARTS Second Order\")\n parser.add_argument('--data_path', type=str, help='Path to dataset')\n parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')\n # channels and number-of-cells\n parser.add_argument('--config_path', type=str, help='The config path.')\n parser.add_argument('--search_space_name', type=str, help='The search space name.')\n parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')\n parser.add_argument('--channel', type=int, help='The number of channels.')\n parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')\n parser.add_argument('--track_running_stats',type=int, choices=[0,1],help='Whether use track_running_stats or not in the BN layer.')\n parser.add_argument('--model_config', type=str,\n help='The path of the model configuration. When this arg is set, it will cover max_nodes / channels / num_cells.')\n # architecture leraning rate\n parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\n parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\n # log\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')\n parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')\n parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')\n parser.add_argument('--rand_seed', type=int, help='manual seed')\n args = parser.parse_args()\n if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)\n main(args)\n",
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #\n############################################################################################\n# NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search, ICLR 2020 #\n############################################################################################\n# NAS-Bench-201-v1_0-e61699.pth : 6219 architectures are trained once, 1621 architectures are trained twice, 7785 architectures are trained three times. `LESS` only supports CIFAR10-VALID.\n#\n#\n#\nimport os, sys, copy, random, torch, numpy as np\nfrom collections import OrderedDict, defaultdict\n\n\ndef print_information(information, extra_info=None, show=False):\n dataset_names = information.get_dataset_names()\n strings = [information.arch_str, 'datasets : {:}, extra-info : {:}'.format(dataset_names, extra_info)]\n def metric2str(loss, acc):\n return 'loss = {:.3f}, top1 = {:.2f}%'.format(loss, acc)\n\n for ida, dataset in enumerate(dataset_names):\n #flop, param, latency = information.get_comput_costs(dataset)\n metric = information.get_comput_costs(dataset)\n flop, param, latency = metric['flops'], metric['params'], metric['latency']\n str1 = '{:14s} FLOP={:6.2f} M, Params={:.3f} MB, latency={:} ms.'.format(dataset, flop, param, '{:.2f}'.format(latency*1000) if latency is not None and latency > 0 else None)\n train_info = information.get_metrics(dataset, 'train')\n if dataset == 'cifar10-valid':\n valid_info = information.get_metrics(dataset, 'x-valid')\n str2 = '{:14s} train : [{:}], valid : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy']))\n elif dataset == 'cifar10':\n test__info = information.get_metrics(dataset, 'ori-test')\n str2 = '{:14s} train : [{:}], test : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy']))\n else:\n valid_info = information.get_metrics(dataset, 'x-valid')\n test__info = information.get_metrics(dataset, 'x-test')\n str2 = '{:14s} train : [{:}], valid : [{:}], test : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy']))\n strings += [str1, str2]\n if show: print('\\n'.join(strings))\n return strings\n\n\nclass NASBench201API(object):\n\n def __init__(self, file_path_or_dict, verbose=True):\n if isinstance(file_path_or_dict, str):\n if verbose: print('try to create the NAS-Bench-201 api from {:}'.format(file_path_or_dict))\n assert os.path.isfile(file_path_or_dict), 'invalid path : {:}'.format(file_path_or_dict)\n file_path_or_dict = torch.load(file_path_or_dict)\n elif isinstance(file_path_or_dict, dict):\n file_path_or_dict = copy.deepcopy( file_path_or_dict )\n else: raise ValueError('invalid type : {:} not in [str, dict]'.format(type(file_path_or_dict)))\n assert isinstance(file_path_or_dict, dict), 'It should be a dict instead of {:}'.format(type(file_path_or_dict))\n keys = ('meta_archs', 'arch2infos', 'evaluated_indexes')\n for key in keys: assert key in file_path_or_dict, 'Can not find key[{:}] in the dict'.format(key)\n self.meta_archs = copy.deepcopy( file_path_or_dict['meta_archs'] )\n self.arch2infos_less = OrderedDict()\n self.arch2infos_full = OrderedDict()\n for xkey in sorted(list(file_path_or_dict['arch2infos'].keys())):\n all_info = file_path_or_dict['arch2infos'][xkey]\n self.arch2infos_less[xkey] = ArchResults.create_from_state_dict( all_info['less'] )\n self.arch2infos_full[xkey] = ArchResults.create_from_state_dict( all_info['full'] )\n self.evaluated_indexes = sorted(list(file_path_or_dict['evaluated_indexes']))\n self.archstr2index = {}\n for idx, arch in enumerate(self.meta_archs):\n #assert arch.tostr() not in self.archstr2index, 'This [{:}]-th arch {:} already in the dict ({:}).'.format(idx, arch, self.archstr2index[arch.tostr()])\n assert arch not in self.archstr2index, 'This [{:}]-th arch {:} already in the dict ({:}).'.format(idx, arch, self.archstr2index[arch])\n self.archstr2index[ arch ] = idx\n\n def __getitem__(self, index):\n return copy.deepcopy( self.meta_archs[index] )\n\n def __len__(self):\n return len(self.meta_archs)\n\n def __repr__(self):\n return ('{name}({num}/{total} architectures)'.format(name=self.__class__.__name__, num=len(self.evaluated_indexes), total=len(self.meta_archs)))\n\n def random(self):\n return random.randint(0, len(self.meta_archs)-1)\n\n # This function is used to query the index of an architecture in the search space.\n # The input arch can be an architecture string such as '|nor_conv_3x3~0|+|nor_conv_3x3~0|avg_pool_3x3~1|+|skip_connect~0|nor_conv_3x3~1|skip_connect~2|'\n # or an instance that has the 'tostr' function that can generate the architecture string.\n # This function will return the index.\n # If return -1, it means this architecture is not in the search space.\n # Otherwise, it will return an int in [0, the-number-of-candidates-in-the-search-space).\n def query_index_by_arch(self, arch):\n if isinstance(arch, str):\n if arch in self.archstr2index: arch_index = self.archstr2index[ arch ]\n else : arch_index = -1\n elif hasattr(arch, 'tostr'):\n if arch.tostr() in self.archstr2index: arch_index = self.archstr2index[ arch.tostr() ]\n else : arch_index = -1\n else: arch_index = -1\n return arch_index\n\n # Overwrite all information of the 'index'-th architecture in the search space.\n # It will load its data from 'archive_root'.\n def reload(self, archive_root, index):\n assert os.path.isdir(archive_root), 'invalid directory : {:}'.format(archive_root)\n xfile_path = os.path.join(archive_root, '{:06d}-FULL.pth'.format(index))\n assert 0 <= index < len(self.meta_archs), 'invalid index of {:}'.format(index)\n assert os.path.isfile(xfile_path), 'invalid data path : {:}'.format(xfile_path)\n xdata = torch.load(xfile_path)\n assert isinstance(xdata, dict) and 'full' in xdata and 'less' in xdata, 'invalid format of data in {:}'.format(xfile_path)\n self.arch2infos_less[index] = ArchResults.create_from_state_dict( xdata['less'] )\n self.arch2infos_full[index] = ArchResults.create_from_state_dict( xdata['full'] )\n \n # This function is used to query the information of a specific archiitecture\n # 'arch' can be an architecture index or an architecture string\n # When use_12epochs_result=True, the hyper-parameters used to train a model are in 'configs/nas-benchmark/CIFAR.config'\n # When use_12epochs_result=False, the hyper-parameters used to train a model are in 'configs/nas-benchmark/LESS.config'\n # The difference between these two configurations are the number of training epochs, which is 200 in CIFAR.config and 12 in LESS.config.\n def query_by_arch(self, arch, use_12epochs_result=False):\n if isinstance(arch, int):\n arch_index = arch\n else:\n arch_index = self.query_index_by_arch(arch)\n if arch_index == -1: return None # the following two lines are used to support few training epochs\n if use_12epochs_result: arch2infos = self.arch2infos_less\n else : arch2infos = self.arch2infos_full\n if arch_index in arch2infos:\n strings = print_information(arch2infos[ arch_index ], 'arch-index={:}'.format(arch_index))\n return '\\n'.join(strings)\n else:\n print ('Find this arch-index : {:}, but this arch is not evaluated.'.format(arch_index))\n return None\n\n # This 'query_by_index' function is used to query information with the training of 12 epochs or 200 epochs.\n # ------\n # If use_12epochs_result=True, we train the model by 12 epochs (see config in configs/nas-benchmark/LESS.config)\n # If use_12epochs_result=False, we train the model by 200 epochs (see config in configs/nas-benchmark/CIFAR.config)\n # ------\n # If dataname is None, return the ArchResults\n # else, return a dict with all trials on that dataset (the key is the seed)\n # Options are 'cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120'.\n # -- cifar10-valid : training the model on the CIFAR-10 training set.\n # -- cifar10 : training the model on the CIFAR-10 training + validation set.\n # -- cifar100 : training the model on the CIFAR-100 training set.\n # -- ImageNet16-120 : training the model on the ImageNet16-120 training set.\n def query_by_index(self, arch_index, dataname=None, use_12epochs_result=False):\n if use_12epochs_result: basestr, arch2infos = '12epochs' , self.arch2infos_less\n else : basestr, arch2infos = '200epochs', self.arch2infos_full\n assert arch_index in arch2infos, 'arch_index [{:}] does not in arch2info with {:}'.format(arch_index, basestr)\n archInfo = copy.deepcopy( arch2infos[ arch_index ] )\n if dataname is None: return archInfo\n else:\n assert dataname in archInfo.get_dataset_names(), 'invalid dataset-name : {:}'.format(dataname)\n info = archInfo.query(dataname)\n return info\n\n def query_meta_info_by_index(self, arch_index, use_12epochs_result=False):\n if use_12epochs_result: basestr, arch2infos = '12epochs' , self.arch2infos_less\n else : basestr, arch2infos = '200epochs', self.arch2infos_full\n assert arch_index in arch2infos, 'arch_index [{:}] does not in arch2info with {:}'.format(arch_index, basestr)\n archInfo = copy.deepcopy( arch2infos[ arch_index ] )\n return archInfo\n\n def find_best(self, dataset, metric_on_set, FLOP_max=None, Param_max=None, use_12epochs_result=False):\n if use_12epochs_result: basestr, arch2infos = '12epochs' , self.arch2infos_less\n else : basestr, arch2infos = '200epochs', self.arch2infos_full\n best_index, highest_accuracy = -1, None\n for i, idx in enumerate(self.evaluated_indexes):\n info = arch2infos[idx].get_comput_costs(dataset)\n flop, param, latency = info['flops'], info['params'], info['latency']\n if FLOP_max is not None and flop > FLOP_max : continue\n if Param_max is not None and param > Param_max: continue\n xinfo = arch2infos[idx].get_metrics(dataset, metric_on_set)\n loss, accuracy = xinfo['loss'], xinfo['accuracy']\n if best_index == -1:\n best_index, highest_accuracy = idx, accuracy\n elif highest_accuracy < accuracy:\n best_index, highest_accuracy = idx, accuracy\n return best_index, highest_accuracy\n\n # return the topology structure of the `index`-th architecture\n def arch(self, index):\n assert 0 <= index < len(self.meta_archs), 'invalid index : {:} vs. {:}.'.format(index, len(self.meta_archs))\n return copy.deepcopy(self.meta_archs[index])\n\n \"\"\"\n This function is used to obtain the trained weights of the `index`-th architecture on `dataset` with the seed of `seed`\n Args [seed]:\n -- None : return a dict containing the trained weights of all trials, where each key is a seed and its corresponding value is the weights.\n -- a interger : return the weights of a specific trial, whose seed is this interger.\n Args [use_12epochs_result]:\n -- True : train the model by 12 epochs\n -- False : train the model by 200 epochs\n \"\"\"\n def get_net_param(self, index, dataset, seed, use_12epochs_result=False):\n if use_12epochs_result: basestr, arch2infos = '12epochs' , self.arch2infos_less\n else : basestr, arch2infos = '200epochs', self.arch2infos_full\n archresult = arch2infos[index]\n return archresult.get_net_param(dataset, seed)\n \n \"\"\"\n This function is used to obtain the configuration for the `index`-th architecture on `dataset`.\n Args [dataset] (4 possible options):\n -- cifar10-valid : training the model on the CIFAR-10 training set.\n -- cifar10 : training the model on the CIFAR-10 training + validation set.\n -- cifar100 : training the model on the CIFAR-100 training set.\n -- ImageNet16-120 : training the model on the ImageNet16-120 training set.\n This function will return a dict.\n ========= Some examlpes for using this function:\n config = api.get_net_config(128, 'cifar10')\n \"\"\"\n def get_net_config(self, index, dataset):\n archresult = self.arch2infos_full[index]\n all_results = archresult.query(dataset, None)\n if len(all_results) == 0: raise ValueError('can not find one valid trial for the {:}-th architecture on {:}'.format(index, dataset))\n for seed, result in all_results.items():\n return result.get_config(None)\n #print ('SEED [{:}] : {:}'.format(seed, result))\n raise ValueError('Impossible to reach here!')\n\n # obtain the cost metric for the `index`-th architecture on a dataset\n def get_cost_info(self, index, dataset, use_12epochs_result=False):\n if use_12epochs_result: basestr, arch2infos = '12epochs' , self.arch2infos_less\n else : basestr, arch2infos = '200epochs', self.arch2infos_full\n archresult = arch2infos[index]\n return archresult.get_comput_costs(dataset)\n\n # obtain the metric for the `index`-th architecture\n # `dataset` indicates the dataset:\n # 'cifar10-valid' : using the proposed train set of CIFAR-10 as the training set\n # 'cifar10' : using the proposed train+valid set of CIFAR-10 as the training set\n # 'cifar100' : using the proposed train set of CIFAR-100 as the training set\n # 'ImageNet16-120' : using the proposed train set of ImageNet-16-120 as the training set\n # `iepoch` indicates the index of training epochs from 0 to 11/199.\n # When iepoch=None, it will return the metric for the last training epoch\n # When iepoch=11, it will return the metric for the 11-th training epoch (starting from 0)\n # `use_12epochs_result` indicates different hyper-parameters for training\n # When use_12epochs_result=True, it trains the network with 12 epochs and the LR decayed from 0.1 to 0 within 12 epochs\n # When use_12epochs_result=False, it trains the network with 200 epochs and the LR decayed from 0.1 to 0 within 200 epochs\n # `is_random`\n # When is_random=True, the performance of a random architecture will be returned\n # When is_random=False, the performanceo of all trials will be averaged.\n def get_more_info(self, index, dataset, iepoch=None, use_12epochs_result=False, is_random=True):\n if use_12epochs_result: basestr, arch2infos = '12epochs' , self.arch2infos_less\n else : basestr, arch2infos = '200epochs', self.arch2infos_full\n archresult = arch2infos[index]\n # if randomly select one trial, select the seed at first\n if isinstance(is_random, bool) and is_random:\n seeds = archresult.get_dataset_seeds(dataset)\n is_random = random.choice(seeds)\n if dataset == 'cifar10-valid':\n train_info = archresult.get_metrics(dataset, 'train' , iepoch=iepoch, is_random=is_random)\n valid_info = archresult.get_metrics(dataset, 'x-valid' , iepoch=iepoch, is_random=is_random)\n try:\n test__info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random)\n except:\n test__info = None\n total = train_info['iepoch'] + 1\n xifo = {'train-loss' : train_info['loss'],\n 'train-accuracy': train_info['accuracy'],\n 'train-per-time': None if train_info['all_time'] is None else train_info['all_time'] / total,\n 'train-all-time': train_info['all_time'],\n 'valid-loss' : valid_info['loss'],\n 'valid-accuracy': valid_info['accuracy'],\n 'valid-all-time': valid_info['all_time'],\n 'valid-per-time': None if valid_info['all_time'] is None else valid_info['all_time'] / total}\n if test__info is not None:\n xifo['test-loss'] = test__info['loss']\n xifo['test-accuracy'] = test__info['accuracy']\n return xifo\n else:\n train_info = archresult.get_metrics(dataset, 'train' , iepoch=iepoch, is_random=is_random)\n try:\n if dataset == 'cifar10':\n test__info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random)\n else:\n test__info = archresult.get_metrics(dataset, 'x-test', iepoch=iepoch, is_random=is_random)\n except:\n test__info = None\n try:\n valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random)\n except:\n valid_info = None\n try:\n est_valid_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random)\n except:\n est_valid_info = None\n xifo = {'train-loss' : train_info['loss'],\n 'train-accuracy': train_info['accuracy']}\n if test__info is not None:\n xifo['test-loss'] = test__info['loss'],\n xifo['test-accuracy'] = test__info['accuracy']\n if valid_info is not None:\n xifo['valid-loss'] = valid_info['loss']\n xifo['valid-accuracy'] = valid_info['accuracy']\n if est_valid_info is not None:\n xifo['est-valid-loss'] = est_valid_info['loss']\n xifo['est-valid-accuracy'] = est_valid_info['accuracy']\n return xifo\n\n \"\"\"\n This function will print the information of a specific (or all) architecture(s).\n If the index < 0: it will loop for all architectures and print their information one by one.\n else: it will print the information of the 'index'-th archiitecture.\n \"\"\"\n def show(self, index=-1):\n if index < 0: # show all architectures\n print(self)\n for i, idx in enumerate(self.evaluated_indexes):\n print('\\n' + '-' * 10 + ' The ({:5d}/{:5d}) {:06d}-th architecture! '.format(i, len(self.evaluated_indexes), idx) + '-'*10)\n print('arch : {:}'.format(self.meta_archs[idx]))\n strings = print_information(self.arch2infos_full[idx])\n print('>' * 40 + ' {:03d} epochs '.format(self.arch2infos_full[idx].get_total_epoch()) + '>' * 40)\n print('\\n'.join(strings))\n strings = print_information(self.arch2infos_less[idx])\n print('>' * 40 + ' {:03d} epochs '.format(self.arch2infos_less[idx].get_total_epoch()) + '>' * 40)\n print('\\n'.join(strings))\n print('<' * 40 + '------------' + '<' * 40)\n else:\n if 0 <= index < len(self.meta_archs):\n if index not in self.evaluated_indexes: print('The {:}-th architecture has not been evaluated or not saved.'.format(index))\n else:\n strings = print_information(self.arch2infos_full[index])\n print('>' * 40 + ' {:03d} epochs '.format(self.arch2infos_full[index].get_total_epoch()) + '>' * 40)\n print('\\n'.join(strings))\n strings = print_information(self.arch2infos_less[index])\n print('>' * 40 + ' {:03d} epochs '.format(self.arch2infos_less[index].get_total_epoch()) + '>' * 40)\n print('\\n'.join(strings))\n print('<' * 40 + '------------' + '<' * 40)\n else:\n print('This index ({:}) is out of range (0~{:}).'.format(index, len(self.meta_archs)))\n\n # This func shows how to read the string-based architecture encoding\n # the same as the `str2structure` func in `AutoDL-Projects/lib/models/cell_searchs/genotypes.py`\n # Usage:\n # arch = api.str2lists( '|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|' )\n # print ('there are {:} nodes in this arch'.format(len(arch)+1)) # arch is a list\n # for i, node in enumerate(arch):\n # print('the {:}-th node is the sum of these {:} nodes with op: {:}'.format(i+1, len(node), node))\n @staticmethod\n def str2lists(xstr):\n assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))\n nodestrs = xstr.split('+')\n genotypes = []\n for i, node_str in enumerate(nodestrs):\n inputs = list(filter(lambda x: x != '', node_str.split('|')))\n for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput)\n inputs = ( xi.split('~') for xi in inputs )\n input_infos = tuple( (op, int(IDX)) for (op, IDX) in inputs)\n genotypes.append( input_infos )\n return genotypes\n\n # This func shows how to convert the string-based architecture encoding to the encoding strategy in NAS-Bench-101\n # Usage:\n # # this will return a numpy matrix (2-D np.array)\n # matrix = api.str2matrix( '|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|' )\n # # This matrix is 4-by-4 matrix representing a cell with 4 nodes (only the lower left triangle is useful).\n # [ [0, 0, 0, 0], # the first line represents the input (0-th) node\n # [2, 0, 0, 0], # the second line represents the 1-st node, is calculated by 2-th-op( 0-th-node )\n # [0, 0, 0, 0], # the third line represents the 2-nd node, is calculated by 0-th-op( 0-th-node ) + 0-th-op( 1-th-node )\n # [0, 0, 1, 0] ] # the fourth line represents the 3-rd node, is calculated by 0-th-op( 0-th-node ) + 0-th-op( 1-th-node ) + 1-th-op( 2-th-node )\n # In NAS-Bench-201 search space, 0-th-op is 'none', 1-th-op is 'skip_connect'\n # 2-th-op is 'nor_conv_1x1', 3-th-op is 'nor_conv_3x3', 4-th-op is 'avg_pool_3x3'.\n @staticmethod\n def str2matrix(xstr):\n assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))\n # this only support NAS-Bench-201 search space\n # this defination will be consistant with this line https://github.com/D-X-Y/AutoDL-Projects/blob/master/lib/models/cell_operations.py#L24\n # If a node has two input-edges from the same node, this function does not work. One edge will be overleaped.\n NAS_BENCH_201 = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']\n nodestrs = xstr.split('+')\n num_nodes = len(nodestrs) + 1\n matrix = np.zeros((num_nodes,num_nodes))\n for i, node_str in enumerate(nodestrs):\n inputs = list(filter(lambda x: x != '', node_str.split('|')))\n for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput)\n for xi in inputs:\n op, idx = xi.split('~')\n if op not in NAS_BENCH_201: raise ValueError('this op ({:}) is not in {:}'.format(op, NAS_BENCH_201))\n op_idx, node_idx = NAS_BENCH_201.index(op), int(idx)\n matrix[i+1, node_idx] = op_idx\n return matrix\n\n\n\n\nclass ArchResults(object):\n\n def __init__(self, arch_index, arch_str):\n self.arch_index = int(arch_index)\n self.arch_str = copy.deepcopy(arch_str)\n self.all_results = dict()\n self.dataset_seed = dict()\n self.clear_net_done = False\n\n def get_comput_costs(self, dataset):\n x_seeds = self.dataset_seed[dataset]\n results = [self.all_results[ (dataset, seed) ] for seed in x_seeds]\n\n flops = [result.flop for result in results]\n params = [result.params for result in results]\n lantencies = [result.get_latency() for result in results]\n lantencies = [x for x in lantencies if x > 0]\n mean_latency = np.mean(lantencies) if len(lantencies) > 0 else None\n time_infos = defaultdict(list)\n for result in results:\n time_info = result.get_times()\n for key, value in time_info.items(): time_infos[key].append( value )\n \n info = {'flops' : np.mean(flops),\n 'params' : np.mean(params),\n 'latency': mean_latency}\n for key, value in time_infos.items():\n if len(value) > 0 and value[0] is not None:\n info[key] = np.mean(value)\n else: info[key] = None\n return info\n\n \"\"\"\n This `get_metrics` function is used to obtain obtain the loss, accuracy, etc information on a specific dataset.\n If not specify, each set refer to the proposed split in NAS-Bench-201 paper.\n If some args return None or raise error, then it is not avaliable.\n ========================================\n Args [dataset] (4 possible options):\n -- cifar10-valid : training the model on the CIFAR-10 training set.\n -- cifar10 : training the model on the CIFAR-10 training + validation set.\n -- cifar100 : training the model on the CIFAR-100 training set.\n -- ImageNet16-120 : training the model on the ImageNet16-120 training set.\n Args [setname] (each dataset has different setnames):\n -- When dataset = cifar10-valid, you can use 'train', 'x-valid', 'ori-test'\n ------ 'train' : the metric on the training set.\n ------ 'x-valid' : the metric on the validation set.\n ------ 'ori-test' : the metric on the test set.\n -- When dataset = cifar10, you can use 'train', 'ori-test'.\n ------ 'train' : the metric on the training + validation set.\n ------ 'ori-test' : the metric on the test set.\n -- When dataset = cifar100 or ImageNet16-120, you can use 'train', 'ori-test', 'x-valid', 'x-test'\n ------ 'train' : the metric on the training set.\n ------ 'x-valid' : the metric on the validation set.\n ------ 'x-test' : the metric on the test set.\n ------ 'ori-test' : the metric on the validation + test set.\n Args [iepoch] (None or an integer in [0, the-number-of-total-training-epochs)\n ------ None : return the metric after the last training epoch.\n ------ an integer i : return the metric after the i-th training epoch.\n Args [is_random]:\n ------ True : return the metric of a randomly selected trial.\n ------ False : return the averaged metric of all avaliable trials.\n ------ an integer indicating the 'seed' value : return the metric of a specific trial (whose random seed is 'is_random').\n \"\"\"\n def get_metrics(self, dataset, setname, iepoch=None, is_random=False):\n x_seeds = self.dataset_seed[dataset]\n results = [self.all_results[ (dataset, seed) ] for seed in x_seeds]\n infos = defaultdict(list)\n for result in results:\n if setname == 'train':\n info = result.get_train(iepoch)\n else:\n info = result.get_eval(setname, iepoch)\n for key, value in info.items(): infos[key].append( value )\n return_info = dict()\n if isinstance(is_random, bool) and is_random: # randomly select one\n index = random.randint(0, len(results)-1)\n for key, value in infos.items(): return_info[key] = value[index]\n elif isinstance(is_random, bool) and not is_random: # average\n for key, value in infos.items():\n if len(value) > 0 and value[0] is not None:\n return_info[key] = np.mean(value)\n else: return_info[key] = None\n elif isinstance(is_random, int): # specify the seed\n if is_random not in x_seeds: raise ValueError('can not find random seed ({:}) from {:}'.format(is_random, x_seeds))\n index = x_seeds.index(is_random)\n for key, value in infos.items(): return_info[key] = value[index]\n else:\n raise ValueError('invalid value for is_random: {:}'.format(is_random))\n return return_info\n\n def show(self, is_print=False):\n return print_information(self, None, is_print)\n\n def get_dataset_names(self):\n return list(self.dataset_seed.keys())\n\n def get_dataset_seeds(self, dataset):\n return copy.deepcopy( self.dataset_seed[dataset] )\n\n \"\"\"\n This function will return the trained network's weights on the 'dataset'.\n When the 'seed' is None, it will return the weights for every run trial in the form of a dict.\n When the \n \"\"\"\n def get_net_param(self, dataset, seed=None):\n if seed is None:\n x_seeds = self.dataset_seed[dataset]\n return {seed: self.all_results[(dataset, seed)].get_net_param() for seed in x_seeds}\n else:\n return self.all_results[(dataset, seed)].get_net_param()\n\n # get the total number of training epochs\n def get_total_epoch(self, dataset=None):\n if dataset is None:\n epochss = []\n for xdata, x_seeds in self.dataset_seed.items():\n epochss += [self.all_results[(xdata, seed)].get_total_epoch() for seed in x_seeds]\n elif isinstance(dataset, str):\n x_seeds = self.dataset_seed[dataset]\n epochss = [self.all_results[(dataset, seed)].get_total_epoch() for seed in x_seeds]\n else:\n raise ValueError('invalid dataset={:}'.format(dataset))\n if len(set(epochss)) > 1: raise ValueError('Each trial mush have the same number of training epochs : {:}'.format(epochss))\n return epochss[-1]\n\n # return the ResultsCount object (containing all information of a single trial) for 'dataset' and 'seed'\n def query(self, dataset, seed=None):\n if seed is None:\n x_seeds = self.dataset_seed[dataset]\n return {seed: self.all_results[ (dataset, seed) ] for seed in x_seeds}\n else:\n return self.all_results[ (dataset, seed) ]\n\n def arch_idx_str(self):\n return '{:06d}'.format(self.arch_index)\n\n def update(self, dataset_name, seed, result):\n if dataset_name not in self.dataset_seed:\n self.dataset_seed[dataset_name] = []\n assert seed not in self.dataset_seed[dataset_name], '{:}-th arch alreadly has this seed ({:}) on {:}'.format(self.arch_index, seed, dataset_name)\n self.dataset_seed[ dataset_name ].append( seed )\n self.dataset_seed[ dataset_name ] = sorted( self.dataset_seed[ dataset_name ] )\n assert (dataset_name, seed) not in self.all_results\n self.all_results[ (dataset_name, seed) ] = result\n self.clear_net_done = False\n\n def state_dict(self):\n state_dict = dict()\n for key, value in self.__dict__.items():\n if key == 'all_results': # contain the class of ResultsCount\n xvalue = dict()\n assert isinstance(value, dict), 'invalid type of value for {:} : {:}'.format(key, type(value))\n for _k, _v in value.items():\n assert isinstance(_v, ResultsCount), 'invalid type of value for {:}/{:} : {:}'.format(key, _k, type(_v))\n xvalue[_k] = _v.state_dict()\n else:\n xvalue = value\n state_dict[key] = xvalue\n return state_dict\n\n def load_state_dict(self, state_dict):\n new_state_dict = dict()\n for key, value in state_dict.items():\n if key == 'all_results': # to convert to the class of ResultsCount\n xvalue = dict()\n assert isinstance(value, dict), 'invalid type of value for {:} : {:}'.format(key, type(value))\n for _k, _v in value.items():\n xvalue[_k] = ResultsCount.create_from_state_dict(_v)\n else: xvalue = value\n new_state_dict[key] = xvalue\n self.__dict__.update(new_state_dict)\n\n @staticmethod\n def create_from_state_dict(state_dict_or_file):\n x = ArchResults(-1, -1)\n if isinstance(state_dict_or_file, str): # a file path\n state_dict = torch.load(state_dict_or_file)\n elif isinstance(state_dict_or_file, dict):\n state_dict = state_dict_or_file\n else:\n raise ValueError('invalid type of state_dict_or_file : {:}'.format(type(state_dict_or_file)))\n x.load_state_dict(state_dict)\n return x\n\n # This function is used to clear the weights saved in each 'result'\n # This can help reduce the memory footprint.\n def clear_params(self):\n for key, result in self.all_results.items():\n result.net_state_dict = None\n self.clear_net_done = True \n\n def __repr__(self):\n return ('{name}(arch-index={index}, arch={arch}, {num} runs, clear={clear})'.format(name=self.__class__.__name__, index=self.arch_index, arch=self.arch_str, num=len(self.all_results), clear=self.clear_net_done))\n \n\n\n\"\"\"\nThis class (ResultsCount) is used to save the information of one trial for a single architecture.\nI did not write much comment for this class, because it is the lowest-level class in NAS-Bench-201 API, which will be rarely called.\nIf you have any question regarding this class, please open an issue or email me.\n\"\"\"\nclass ResultsCount(object):\n\n def __init__(self, name, state_dict, train_accs, train_losses, params, flop, arch_config, seed, epochs, latency):\n self.name = name\n self.net_state_dict = state_dict\n self.train_acc1es = copy.deepcopy(train_accs)\n self.train_acc5es = None\n self.train_losses = copy.deepcopy(train_losses)\n self.train_times = None\n self.arch_config = copy.deepcopy(arch_config)\n self.params = params\n self.flop = flop\n self.seed = seed\n self.epochs = epochs\n self.latency = latency\n # evaluation results\n self.reset_eval()\n\n def update_train_info(self, train_acc1es, train_acc5es, train_losses, train_times):\n self.train_acc1es = train_acc1es\n self.train_acc5es = train_acc5es\n self.train_losses = train_losses\n self.train_times = train_times\n\n def reset_eval(self):\n self.eval_names = []\n self.eval_acc1es = {}\n self.eval_times = {}\n self.eval_losses = {}\n\n def update_latency(self, latency):\n self.latency = copy.deepcopy( latency )\n\n def update_eval(self, accs, losses, times): # new version\n data_names = set([x.split('@')[0] for x in accs.keys()])\n for data_name in data_names:\n assert data_name not in self.eval_names, '{:} has already been added into eval-names'.format(data_name)\n self.eval_names.append( data_name )\n for iepoch in range(self.epochs):\n xkey = '{:}@{:}'.format(data_name, iepoch)\n self.eval_acc1es[ xkey ] = accs[ xkey ]\n self.eval_losses[ xkey ] = losses[ xkey ]\n self.eval_times [ xkey ] = times[ xkey ]\n\n def update_OLD_eval(self, name, accs, losses): # old version\n assert name not in self.eval_names, '{:} has already added'.format(name)\n self.eval_names.append( name )\n for iepoch in range(self.epochs):\n if iepoch in accs:\n self.eval_acc1es['{:}@{:}'.format(name,iepoch)] = accs[iepoch]\n self.eval_losses['{:}@{:}'.format(name,iepoch)] = losses[iepoch]\n\n def __repr__(self):\n num_eval = len(self.eval_names)\n set_name = '[' + ', '.join(self.eval_names) + ']'\n return ('{name}({xname}, arch={arch}, FLOP={flop:.2f}M, Param={param:.3f}MB, seed={seed}, {num_eval} eval-sets: {set_name})'.format(name=self.__class__.__name__, xname=self.name, arch=self.arch_config['arch_str'], flop=self.flop, param=self.params, seed=self.seed, num_eval=num_eval, set_name=set_name))\n\n # get the total number of training epochs\n def get_total_epoch(self):\n return copy.deepcopy(self.epochs)\n \n # get the latency\n # -1 represents not avaliable ; otherwise it should be a float value\n def get_latency(self):\n if self.latency is None: return -1\n else: return sum(self.latency) / len(self.latency)\n\n # get the information regarding time\n def get_times(self):\n if self.train_times is not None and isinstance(self.train_times, dict):\n train_times = list( self.train_times.values() )\n time_info = {'T-train@epoch': np.mean(train_times), 'T-train@total': np.sum(train_times)}\n for name in self.eval_names:\n xtimes = [self.eval_times['{:}@{:}'.format(name,i)] for i in range(self.epochs)]\n time_info['T-{:}@epoch'.format(name)] = np.mean(xtimes)\n time_info['T-{:}@total'.format(name)] = np.sum(xtimes)\n else:\n time_info = {'T-train@epoch': None, 'T-train@total': None }\n for name in self.eval_names:\n time_info['T-{:}@epoch'.format(name)] = None\n time_info['T-{:}@total'.format(name)] = None\n return time_info\n\n def get_eval_set(self):\n return self.eval_names\n\n # get the training information\n def get_train(self, iepoch=None):\n if iepoch is None: iepoch = self.epochs-1\n assert 0 <= iepoch < self.epochs, 'invalid iepoch={:} < {:}'.format(iepoch, self.epochs)\n if self.train_times is not None:\n xtime = self.train_times[iepoch]\n atime = sum([self.train_times[i] for i in range(iepoch+1)])\n else: xtime, atime = None, None\n return {'iepoch' : iepoch,\n 'loss' : self.train_losses[iepoch],\n 'accuracy': self.train_acc1es[iepoch],\n 'cur_time': xtime,\n 'all_time': atime}\n\n # get the evaluation information ; there could be multiple evaluation sets (identified by the 'name' argument).\n def get_eval(self, name, iepoch=None):\n if iepoch is None: iepoch = self.epochs-1\n assert 0 <= iepoch < self.epochs, 'invalid iepoch={:} < {:}'.format(iepoch, self.epochs)\n if isinstance(self.eval_times,dict) and len(self.eval_times) > 0:\n xtime = self.eval_times['{:}@{:}'.format(name,iepoch)]\n atime = sum([self.eval_times['{:}@{:}'.format(name,i)] for i in range(iepoch+1)])\n else: xtime, atime = None, None\n return {'iepoch' : iepoch,\n 'loss' : self.eval_losses['{:}@{:}'.format(name,iepoch)],\n 'accuracy': self.eval_acc1es['{:}@{:}'.format(name,iepoch)],\n 'cur_time': xtime,\n 'all_time': atime}\n\n def get_net_param(self):\n return self.net_state_dict\n\n # This function is used to obtain the config dict for this architecture.\n def get_config(self, str2structure):\n if str2structure is None:\n return {'name': 'infer.tiny', 'C': self.arch_config['channel'], \\\n 'N' : self.arch_config['num_cells'], \\\n 'arch_str': self.arch_config['arch_str'], 'num_classes': self.arch_config['class_num']}\n else:\n return {'name': 'infer.tiny', 'C': self.arch_config['channel'], \\\n 'N' : self.arch_config['num_cells'], \\\n 'genotype': str2structure(self.arch_config['arch_str']), 'num_classes': self.arch_config['class_num']}\n\n def state_dict(self):\n _state_dict = {key: value for key, value in self.__dict__.items()}\n return _state_dict\n\n def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)\n\n @staticmethod\n def create_from_state_dict(state_dict):\n x = ResultsCount(None, None, None, None, None, None, None, None, None, None)\n x.load_state_dict(state_dict)\n return x\n"
] | [
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.set_num_threads",
"torch.save"
],
[
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.load",
"torch.zeros_like",
"torch.no_grad",
"torch.set_num_threads",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.autograd.Variable"
],
[
"numpy.sum",
"numpy.zeros",
"numpy.mean",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rtachi-lab/Human-Cochlear-Model | [
"6584de225176d8d1b2be96939acb7ef7d3f64774"
] | [
"CochlearModel_2D_Direct.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tqdm\nimport wavfile\n\nclass CochlearModel:\n \"\"\"\n Two-dimensional cochlear model with two-degree-of-freedom\n (2DOF) micro-structure [1] for human. This program employs \n time domain solution proposed in Ref. [2], and for fast calcuration,\n applies non-unifomrom grid setting [3].\n\n Ref.\n [1] Neely S and Kim D, \"A model for active elements in cochlear biomechanics,\"\n The Journal of the Acoustical Society of America, 79(5), 1472--1480, 1986.\n [2] Diependaal, R.J et al, \"Numerical methods for solving one-dimensional\n cochlear models in the time domain, \" The Journal of the Acoustical Society of \n America, 82 (5), 1655--1666, 1987\n [3] Murakami, Y \"Efficiency limit of nonuniform grid setting in two-dimensional\n cochlear model\" Acoustical Science and Technology, 40 (5), 336--343, 2019. \n \n Attributes\n ----------\n Nx : int\n Number of segments for x-axis\n Ny : int\n Number of segments for y-axis\n Lb : float\n Cochlear length [cm]\n W : float\n Witdh of basilar membrane (BM) [cm]\n H : float\n Height of BM [cm]\n b : float\n ratio of BM to CP displacement\n rho : float\n Fluid density [dyn cm^-3]\n dx : float\n Spacing between two segments for x-axis [cm]\n dy : float\n Spacing between two segments for y-axis [cm]\n x : ndarray\n Longitudial poisition from the stapes [cm]\n y : ndarray\n Poisition from the BM [cm]\n k1 : ndarray\n Compliance of BM [dyn cm^-3]\n m1 : ndarray\n Mass of BM [g cm^-2]\n c1 : ndarray \n Resistance of BM [dyn s cm^-3]\n k2 : ndarray\n Compliance of tectrial membrane (TM) [dyn cm^-3]\n m2 : ndarray\n Mass of TM [g cm^-2]\n c2 : ndarray\n Resistance of TM [dyn s cm^-3]\n k3 : ndarray\n Compliance of connection between BM and TM [dyn cm^-3]\n c3 : ndarray\n Resistance of connection between BM and TM [dyn s cm^-3]\n k4 : ndarray\n Compliance of outer hair cell's (OHC's) activity [dyn cm^-3]\n c4 : ndarray\n Resistance of outer hair cell's (OHC's) activity [dyn s cm^-3]\n gamma : ndarray\n Gain factor distribution \n dt : float\n Time step for time domain simulation [sec]\n beta : float\n Complete saturating point in OHC's active process [cm]\n \"\"\"\n def __init__(self, Nx, Ny, gamma):\n \"\"\"\n Parameters\n ----------\n Nx : int\n Number of segment for x-axis\n Ny : int\n Number of segment for y-axis\n gamma : ndarray\n Gain factor distribution\n \"\"\"\n self.Nx = Nx\n self.Ny = Ny\n self.Lb = 3.5\n self.L = 0.1\n self.W = 0.1\n self.H = 0.1\n self.b = 0.4\n self.rho = 1.0\n self.dx = self.Lb/self.Nx\n self.x = np.arange(0,self.Lb,self.dx)\n \n By = 100\n ry = 100\n m = np.linspace(0,1,Ny)\n Bdy = np.exp(By)\n Ay = (ry-1)*By/((ry-1)*(Bdy-1)+By*(Bdy-1))\n Cy = 1-Ay/By*Bdy+Ay/By\n Dy = -Ay/By\n self.y = (Ay/By*np.exp(By*m) + Cy*m + Dy)*self.H\n self.dy = self.y[1:]-self.y[0:-1]\n\n ch_damp = 2.8 * np.exp(-0.2 * self.x)\n \n self.k1 = 2.2e8*np.exp(-3*self.x)\n self.m1 = 3e-3\n self.c1 = 6 + 670*np.exp(-1.5*self.x) * ch_damp\n self.k2 = 1.4e6*np.exp(-3.3*self.x)\n self.c2 = 4.4*np.exp(-1.65*self.x) * ch_damp\n self.m2 = 0.5e-3\n self.k3 = 2.0e6*np.exp(-3*self.x)\n self.c3 = 0.8*np.exp(-0.6*self.x) * ch_damp\n self.k4 = 1.15e8*np.exp(-3*self.x)\n self.c4 = 440.0*np.exp(-1.5*self.x) * ch_damp\n\n self.c1c3 = self.c1 + self.c3\n self.k1k3 = self.k1 + self.k3\n self.c2c3 = self.c2 + self.c3\n self.k2k3 = self.k2 + self.k3\n\n self.gamma = gamma\n\n self.dt = 10e-6\n\n self.beta = 50e-7\n\n def Gohc(self, uc, beta):\n return beta*np.tanh(uc/beta)\n\n def dGohc(self, uc, vc, beta):\n return vc/np.cosh(uc)**2\n\n def get_g(self, vb, ub, vt, ut):\n\n gb = self.c1c3*vb + self.k1k3*ub - self.c3*vt - self.k3*ut\n gt = - self.c3*vb - self.k3*ub + self.c2c3*vt + self.k2k3*ut\n\n uc_lin = ub - ut\n vc_lin = vb - vt\n\n uc = self.Gohc(uc_lin, self.beta)\n vc = self.dGohc(uc_lin, vc_lin, self.beta)\n\n gb -= self.gamma * ( self.c4*vc + self.k4*uc )\n\n return gb, gt\n\n def solve_time_domain(self, f):\n \"\"\"\n Solve the cochlear model in time domain\n\n Parameters\n ----------\n f : ndarray\n Input signal [cm s^-2]\n\n Returns:\n --------\n vb : ndarray\n Basilar membrane (BM) velocity [cm s^-1]\n ub : ndarray\n Basilar membrane (BM) displacement [cm]\n p : ndarray\n Pressure difference between two chambers [barye]\n (1 [barye]= 0.1 [Pa])\n \"\"\"\n Ntime = int(round(f.size/2))\n T = Ntime * self.dt\n\n t2 = np.arange(0,T,self.dt/2)\n t = np.arange(0,T,self.dt)\n\n alpha2 = 4*self.rho*self.b/self.dy/self.m1\n\n vb = np.zeros((Ntime,Nx))\n ub = np.zeros((Ntime,Nx))\n vt = np.zeros((Ntime,Nx))\n ut = np.zeros((Ntime,Nx))\n\n p = np.zeros((Ntime,Nx))\n\n Ay = np.zeros((Ny,Ny))\n\n Ay[0,0] = -2/self.dy[0]**2 - alpha2[0]\n Ay[0,1] = 2/self.dy[0]**2\n\n aym = np.zeros(Ny)\n bym = np.zeros(Ny)\n aym[1:-1] = self.dy[1:]/self.dy[:-1]\n bym[1:-1] = 2/self.dy[1:]/self.dy[:-1]/(1+aym[1:-1])\n\n for m in range(1,Ny-1):\n Ay[m,m-1] = bym[m]*aym[m]\n Ay[m,m] = -bym[m]*(1+aym[m])\n Ay[m,m+1] = bym[m]\n\n Ay[Ny-1,Ny-2] = 2/self.dy[-1]**2\n Ay[Ny-1,Ny-1] = -2/self.dy[-1]**2\n\n Iy = np.eye(Ny)\n F = np.zeros((Nx*Ny,Nx*Ny))\n F[:Ny,:Ny] = -2*Iy + Ay*self.dx**2\n F[:Ny,Ny:Ny*2] = 2*Iy\n for mm in range(1,Nx-1):\n F[mm*Ny:(mm+1)*Ny,(mm-1)*Ny:mm*Ny] = Iy \n F[mm*Ny:(mm+1)*Ny,mm*Ny:(mm+1)*Ny] = -2*Iy + Ay*self.dx**2 \n F[mm*Ny:(mm+1)*Ny,(mm+1)*Ny:(mm+2)*Ny] = Iy \n F[(Nx-1)*Ny:,(Nx-2)*Ny:(Nx-1)*Ny] = Iy\n F[(Nx-1)*Ny:,(Nx-1)*Ny:] = -2*Iy + Ay*self.dx**2\n F /= self.dx**2\n\n iF = np.linalg.inv(F)\n\n for ii in tqdm.tqdm(range(Ntime-1)):\n ######### RK4 ##################\n\n # (ii)\n gb, gt = self.get_g(vb[ii], ub[ii], vt[ii], ut[ii])\n\n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2] * 2/self.dx\n \n #(iii)\n p[ii] = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb1 = (p[ii]-gb)/self.m1 \n ub1 = ub[ii] + 0.5*self.dt*vb[ii]\n vb1 = vb[ii] + 0.5*self.dt*dvb1\n\n dvt1 = -gt/self.m2\n ut1 = ut[ii] + 0.5*self.dt*vt[ii]\n vt1 = vt[ii] + 0.5*self.dt*dvt1 \n \n # (ii)\n gb, gt = self.get_g(vb1, ub1, vt1, ut1) \n\n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2+1] * 2/self.dx\n\n #(iii)\n p1 = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb2 = (p1-gb)/self.m1\n ub2 = ub[ii] + 0.5*self.dt*vb1\n vb2 = vb[ii] + 0.5*self.dt*dvb2\n\n dvt2 = -gt/self.m2\n ut2 = ut[ii] + 0.5*self.dt*vt1\n vt2 = vt[ii] + 0.5*self.dt*dvt2 \n\n # (ii)\n gb, gt = self.get_g(vb2, ub2, vt2, ut2)\n\n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2+1] * 2/self.dx\n\n #(iii)\n p2 = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb3 = (p2-gb)/self.m1\n ub3 = ub[ii] + self.dt*vb2 \n vb3 = vb[ii] + self.dt*dvb3\n\n dvt3 = -gt/self.m2\n ut3 = ut[ii] + self.dt*vt2\n vt3 = vt[ii] + self.dt*dvt3 \n\n # (ii)\n gb, gt = self.get_g(vb3, ub3, vt3, ut3)\n \n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2+2] * 2/self.dx\n\n #(iii)\n p3 = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb4 = (p3-gb)/self.m1\n\n dvt4 = -gt/self.m2 \n\n ub[ii+1] = ub[ii] + self.dt/6*(vb[ii] + 2*vb1 + 2*vb2 + vb3)\n vb[ii+1] = vb[ii] + self.dt/6*(dvb1 + 2*dvb2 + 2*dvb3 + dvb4) \n ut[ii+1] = ut[ii] + self.dt/6*(vt[ii] + 2*vt1 + 2*vt2 + vt3)\n vt[ii+1] = vt[ii] + self.dt/6*(dvt1 + 2*dvt2 + 2*dvt3 + dvt4)\n\n return vb, ub, p\n\n\"\"\"\nA demonstration plots envelopes of basilar membrane (BM) velocity\nfor 0.25, 1 and 4 kHz tones varied 0 to 100 dB with 20 dB step.\n\"\"\" \nif __name__ == \"__main__\":\n Nx = 300\n Ny = 4\n g = 0.8\n\n gamma = np.ones(Nx)*g\n\n cm = CochlearModel(Nx, Ny, gamma) # Initial setup\n\n Lps = np.arange(0,120,20)\n\n for fp in [250, 1000, 4000]:\n filename = '%gHz.wav'%(fp)\n plt.figure()\n for Lp in Lps:\n print(\"%dHz %ddB\"%(fp, Lp))\n sinewave = wavfile.load(filename, Lp) # Loading input signal\n\n vb, ub, p = cm.solve_time_domain( sinewave ) # Solve\n\n plt.plot(cm.x*10, 20*np.log10(np.max(np.abs(vb[int(round(vb.shape[0]*9/10)):]), axis=0)*10))\n plt.xlabel('Distance from the stapes [mm]')\n plt.ylabel('BM velocity [dB re 1 mm/s]')\n plt.title('%d Hz'%(fp))\n plt.show()"
] | [
[
"numpy.dot",
"numpy.cosh",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.linalg.inv",
"numpy.arange",
"numpy.eye",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.tanh",
"numpy.exp",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ivanwilliammd/32images_hdf5converter | [
"2956c163b790d1fc1c3248e46d17894dde52eeb9"
] | [
"Upsampled_4x/HDF5_converter_0.5.py"
] | [
"import glob\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport h5py\r\nimport IPython \r\nimport pandas as pd\r\nimport csv \r\n\r\ndf = pd.read_csv('lung_annotation_raw_Final.csv')\r\ndf = df[['ACC','TIPE','Xmin','Ymin','Xmax','Ymax','Zt_minsplitnum','Zt_minsplit_rev','Zt_maxsplitnum','Zt_maxsplit_rev','box_size']]\r\ndf\r\n\r\ncsv_file=open('lung_nodule_annotation_0.5.csv', mode='w+')\r\n\r\npath = '/home/ivanwilliam/Documents/Full_images/0.5/'\r\nall_dirs = os.listdir(path)\r\ndir_it=0\r\n\r\nheight = 2048\r\nwidth = 2048\r\nratio = height/512\r\n\r\nfor dir_it in range(len(all_dirs)):\r\n\tfile_path = '/home/ivanwilliam/Documents/Full_images/0.5/'+str(all_dirs[dir_it])\r\n\t# import IPython; IPython.embed()\r\n\r\n\tfor root, dirs, files in os.walk(file_path):\r\n\t\tprint('\\n\\tFound directory: %s' % root)\r\n\r\n\t\t# for subdir in dirs:\r\n\t\t# \tprint('SUBFOLDER OF ' + str(root) + ': ' + str(subdir))\r\n\t\t# \tnamedir = str(subdir)\r\n\t\t\r\n\t\tfileName = sorted(files, key=str)\r\n\t\tN_file = len(fileName)\r\n\r\n\t\ti = 1\r\n\t\tk = 0\r\n\t\t\r\n\t\tif i in range(N_file):\t\r\n\t\t\thdf32_list=[]\r\n\t\t\tfor fileName in sorted (files, key=str):\r\n\t\t\t\tif N_file-k*64>=64:\r\n\t\t\t\t\tprint('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))\r\n\t\t\t\t\tpicture_ds = cv2.imread('%s/%s' % (root, fileName))\r\n\t\t\t\t\tresize_picture_ds = cv2.resize(picture_ds, dsize=(height, width), interpolation=cv2.INTER_CUBIC)\r\n\t\t\t\t\tpics_array= np.array(resize_picture_ds) \r\n\r\n\t\t\t\t\tif i%64==1:\r\n\t\t\t\t\t\tpics32_list=[]\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\tif i%64==0:\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\tpics32_array=np.stack((pics32_list[0], pics32_list[2], pics32_list[4], pics32_list[6], pics32_list[8], \r\n\t\t\t\t\t\t\tpics32_list[10], pics32_list[12], pics32_list[14], pics32_list[16], pics32_list[18],\r\n\t\t\t\t\t\t\tpics32_list[20], pics32_list[22], pics32_list[24], pics32_list[26], pics32_list[28], \r\n\t\t\t\t\t\t\tpics32_list[30], pics32_list[32], pics32_list[34], pics32_list[36], pics32_list[38],\r\n\t\t\t\t\t\t\tpics32_list[40], pics32_list[42], pics32_list[44], pics32_list[46], pics32_list[48], \r\n\t\t\t\t\t\t\tpics32_list[50], pics32_list[52], pics32_list[54], pics32_list[56], pics32_list[58],\r\n\t\t\t\t\t\t\tpics32_list[60], pics32_list[62]), axis=0)\r\n\t\t\t\t\t\tprint('\\n Compiling 32 images into HDF list \\n')\r\n\t\t\t\t\t\thdf32_list.append(pics32_array)\r\n\t\t\t\t\t\tk = k+1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\ti=i+1\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tif i==k*64+1:\r\n\t\t\t\t\t\tprint('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))\r\n\t\t\t\t\t\tpicture_ds = cv2.imread('%s/%s' % (root, fileName))\r\n\t\t\t\t\t\tresize_picture_ds = cv2.resize(picture_ds, dsize=(height, width), interpolation=cv2.INTER_CUBIC)\r\n\t\t\t\t\t\tpics_array= np.array(resize_picture_ds)\r\n\t\t\t\t\t\tr = N_file - k*64\r\n\r\n\t\t\t\t\t\tprint('\\n\\tThere are less than 64 file remaining, using last 64 images as LAST BATCH of HDF5 from %d till %d' % (i, N_file))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tx = 64-r\r\n\t\t\t\t\t\tpics32_list = pics32_list[62-x:62]\r\n\t\t\t\t\t\tprint('\\t...............Start with '+str(len(pics32_list)) +' data(s) from previous batch...............')\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\ti=i+1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))\r\n\t\t\t\t\t\tpicture_ds = cv2.imread('%s/%s' % (root, fileName))\r\n\t\t\t\t\t\tresize_picture_ds = cv2.resize(picture_ds, dsize=(height, width), interpolation=cv2.INTER_CUBIC)\r\n\t\t\t\t\t\tpics_array= np.array(resize_picture_ds)\r\n\r\n\t\t\t\t\t\tif i==N_file:\r\n\t\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\t\tpics32_array=np.stack((pics32_list[0], pics32_list[2], pics32_list[4], pics32_list[6], pics32_list[8], \r\n\t\t\t\t\t\t\t\tpics32_list[10], pics32_list[12], pics32_list[14], pics32_list[16], pics32_list[18],\r\n\t\t\t\t\t\t\t\tpics32_list[20], pics32_list[22], pics32_list[24], pics32_list[26], pics32_list[28], \r\n\t\t\t\t\t\t\t\tpics32_list[30], pics32_list[32], pics32_list[34], pics32_list[36], pics32_list[38],\r\n\t\t\t\t\t\t\t\tpics32_list[40], pics32_list[42], pics32_list[44], pics32_list[46], pics32_list[48], \r\n\t\t\t\t\t\t\t\tpics32_list[50], pics32_list[52], pics32_list[54], pics32_list[56], pics32_list[58],\r\n\t\t\t\t\t\t\t\tpics32_list[60], pics32_list[62]), axis=0)\r\n\t\t\t\t\t\t\tprint('\\n Compiling LAST 32 images into 1 HDF list\\n')\r\n\t\t\t\t\t\t\thdf32_list.append(pics32_array)\r\n\t\t\t\t\t\t\tk=k+1\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\ti=i+1\r\n\t\t\t\r\n\r\n\r\n\r\n######################################################################################\t\t\t\r\n\t\t\tif fileName[0:4]=='AGFA':\r\n\t\t\t\tsearch_str=fileName[0:16]\r\n\t\t\t\tdfAcc=df[df['ACC'].str.match(search_str)]\r\n\t\t\t\ttotal_dfAcc=dfAcc.shape[0]\r\n\t\t\t\tprint('Using AGFA as search keyword')\r\n\t\t\tif fileName[0:4]=='KDC6':\r\n\t\t\t\tsearch_str=fileName[0:10]\r\n\t\t\t\tdfAcc=df[df['ACC'].str.match(search_str)]\r\n\t\t\t\ttotal_dfAcc=dfAcc.shape[0]\r\n\t\t\t\tprint('Using KDC6 as search keyword')\r\n\t\t\telse:\r\n\t\t\t\tprint('Continue......................')\r\n\t\t\t\r\n\t\t\t# maxstopper = total_dfAcc - 1\r\n\t\t\t# import IPython;IPython.embed()\r\n\r\n\r\n\t\t\th=0\r\n\t\t\tp=0\t\t\r\n\t\t\tfor h in range(total_dfAcc):\r\n\t\t\t\ttipe = dfAcc.iloc[[h]].TIPE.values[0]\r\n\t\t\t\tx1 = dfAcc.iloc[[h]].Xmin.values[0]\r\n\t\t\t\ty1 = dfAcc.iloc[[h]].Ymin.values[0]\r\n\t\t\t\tx2 = dfAcc.iloc[[h]].Xmax.values[0]\r\n\t\t\t\ty2 = dfAcc.iloc[[h]].Ymax.values[0]\r\n\t\t\t\tz1_slice = dfAcc.iloc[[h]].Zt_minsplitnum.values[0]\r\n\t\t\t\tz1 = dfAcc.iloc[[h]].Zt_minsplit_rev.values[0]\r\n\t\t\t\tz2_slice = dfAcc.iloc[[h]].Zt_maxsplitnum.values[0]\r\n\t\t\t\tz2 = dfAcc.iloc[[h]].Zt_maxsplit_rev.values[0]\r\n\t\t\t\tbox_size = dfAcc.iloc[[h]].box_size.values[0]\r\n\r\n\t\t\t\tz1_order = str('%0*d' % (3, z1_slice))\r\n\t\t\t\tz2_order = str('%0*d' % (3, z2_slice))\r\n\r\n\t\t\t\tx1 = x1*ratio\r\n\t\t\t\ty1 = y1*ratio\r\n\t\t\t\tx2 = x2*ratio\r\n\t\t\t\ty2 = y2*ratio\r\n\t\t\t\tbox_size=box_size*ratio\r\n\t\t\t\t\r\n\t\t\t\t\r\n\r\n\t\t\t\t# import IPython;IPython.embed()\r\n \t\t\t\t\r\n\t\t\t\tif (x1==0 and x2==0 and y1 ==0 and y2==0) or box_size<32:\r\n\t\t\t\t\tprint ('\\tNo annotation or NODULE too small (<32pix) = '+str(all_dirs[dir_it])+'.......')\r\n\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t# csv_file.write(str(annot_file_path)+','','','','','','',''\\n')\r\n\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\tprint(hdf5_name+\" isn't made\")\r\n\t\t\t\t\th = h+1\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tif z1_slice==z2_slice:\r\n\t\t\t\t\t\t## Printing annotation for z1 & z2\r\n\t\t\t\t\t\tprint ('\\tACC and z1, z2 order match found = '+str(search_str)+'.......')\r\n\t\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\tcsv_file.write(str(annot_file_path)+','+str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+str(z1)+','+str(z2)+','+str(tipe)+'\\n')\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t## Write hdf5 file for z1 & z2\r\n\r\n\t\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\thdf5_path = '/media/ivanwilliam/Ivan_HDD_2TB/i3d_hdf5_lung_data_resized_4x/HDF5_file/'+str(hdf5_name)+'.h5'\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\texists = os.path.isfile(hdf5_path)\r\n\t\t\t\t\t\tif exists:\r\n\t\t\t\t\t\t\tprint(hdf5_name+\" already exists. \\n Continue to next hdf5 files .................\")\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\thdf_file = h5py.File(hdf5_path, 'w')\r\n\t\t\t\t\t\t\tmatrix123 = hdf32_list[z1_slice-1]\r\n\t\t\t\t\t\t\thdf_file.create_dataset(name='dataset', data=matrix123)\r\n\t\t\t\t\t\t\thdf_check=h5py.File(hdf5_path, 'r')\r\n\t\t\t\t\t\t\tbase_items = list (hdf_check.items())\r\n\t\t\t\t\t\t\tprint (\"HDF5_file at \"+str(hdf5_path)+\" which contain: \"+str(base_items)+\" successfully created\")\r\n\t\t\t\t\t\t\thdf_file.close()\r\n\t\t\t\t\r\n\t\t\t\t\t\tp = p+1\r\n\t\t\t\t\t\th = h+1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif z2_slice>z1_slice:\r\n\t\t\t\t\t\t\tprint ('\\tz2 and z1 slice position differ, splitting it into 2 file and annotation..........')\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t## Printing annotation for z1\r\n\t\t\t\t\t\t\tprint ('\\tACC and z1 match found = '+str(search_str)+'.......')\r\n\t\t\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\t\tcsv_file.write(str(annot_file_path)+','+str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+str(z1)+','+'32'+','+str(tipe)+'\\n')\r\n\r\n\t\t\t\t\t\t\t## Printing annotation for z2\r\n\t\t\t\t\t\t\tprint ('\\tACC and z2 match found = '+str(search_str)+'.......')\r\n\t\t\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z2_order))\r\n\t\t\t\t\t\t\tcsv_file.write(str(annot_file_path)+','+str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+'1'+','+str(z2)+','+str(tipe)+'\\n')\r\n\r\n\r\n\t\t\t\t\t\t\t## Write hdf5 file for z1\r\n\t\t\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\t\thdf5_path = '/media/ivanwilliam/Ivan_HDD_2TB/i3d_hdf5_lung_data_resized_4x/HDF5_file/'+str(hdf5_name)+'.h5'\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\texists = os.path.isfile(hdf5_path)\r\n\t\t\t\t\t\t\tif exists:\r\n\t\t\t\t\t\t\t\tprint(hdf5_name+\" already exists. \\n Continue to next hdf5 files .................\")\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\thdf_file = h5py.File(hdf5_path, 'w')\r\n\t\t\t\t\t\t\t\tmatrix123 = hdf32_list[z1_slice-1]\r\n\t\t\t\t\t\t\t\thdf_file.create_dataset(name='dataset', data=matrix123)\r\n\t\t\t\t\t\t\t\thdf_check=h5py.File(hdf5_path, 'r')\r\n\t\t\t\t\t\t\t\tbase_items = list (hdf_check.items())\r\n\t\t\t\t\t\t\t\tprint (\"HDF5_file at \"+str(hdf5_path)+\" which contain: \"+str(base_items)+\" successfully created\")\r\n\t\t\t\t\t\t\t\thdf_file.close()\r\n\r\n\t\t\t\t\t\t\t## Write hdf5 file for z2\r\n\t\t\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z2_order))\r\n\t\t\t\t\t\t\thdf5_path = '/media/ivanwilliam/Ivan_HDD_2TB/i3d_hdf5_lung_data_resized_4x/HDF5_file/'+str(hdf5_name)+'.h5'\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\texists = os.path.isfile(hdf5_path)\r\n\t\t\t\t\t\t\tif exists:\r\n\t\t\t\t\t\t\t\tprint(hdf5_name+\" already exists. \\n Continue to next hdf5 files .................\")\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\thdf_file = h5py.File(hdf5_path, 'w')\r\n\t\t\t\t\t\t\t\tmatrix123 = hdf32_list[z2_slice-1]\r\n\t\t\t\t\t\t\t\thdf_file.create_dataset(name='dataset', data=matrix123)\r\n\t\t\t\t\t\t\t\thdf_check=h5py.File(hdf5_path, 'r')\r\n\t\t\t\t\t\t\t\tbase_items = list (hdf_check.items())\r\n\t\t\t\t\t\t\t\tprint (\"HDF5_file at \"+str(hdf5_path)+\" which contain: \"+str(base_items)+\" successfully created\")\r\n\t\t\t\t\t\t\t\thdf_file.close()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tp = p+1\r\n\t\t\t\t\t\t\th = h+1\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\th = h+1\r\n\t\t\tprint('\\tThere are %d new annotations added' %(p))\r\n\t\t\r\n\t\t\r\n\t\tk = 0\r\n\t\th = 0\r\n\t\ti = 1\r\n\t\thdf32_list=[]\r\n\t\tpics32_list=[]\r\n\t\tpics32_array=[]\r\n\t\tdir_it=dir_it + 1"
] | [
[
"numpy.array",
"pandas.read_csv",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
theendsofinvention/cartoonify | [
"39ea84d96b3e93f0480e6d6158bea506d01278ca",
"39ea84d96b3e93f0480e6d6158bea506d01278ca",
"39ea84d96b3e93f0480e6d6158bea506d01278ca"
] | [
"cartoonify/app/object_detection/core/box_predictor.py",
"cartoonify/app/object_detection/core/region_similarity_calculator.py",
"cartoonify/app/object_detection/core/keypoint_ops_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Box predictor for object detectors.\n\nBox predictors are classes that take a high level\nimage feature map as input and produce two predictions,\n(1) a tensor encoding box locations, and\n(2) a tensor encoding classes for each box.\n\nThese components are passed directly to loss functions\nin our detection models.\n\nThese modules are separated from the main model since the same\nfew box predictor architectures are shared across many models.\n\"\"\"\nfrom abc import abstractmethod\nimport tensorflow as tf\nfrom app.object_detection.utils import ops\nfrom app.object_detection.utils import shape_utils\nfrom app.object_detection.utils import static_shape\n\nslim = tf.contrib.slim\n\nBOX_ENCODINGS = 'box_encodings'\nCLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'\nMASK_PREDICTIONS = 'mask_predictions'\n\n\nclass BoxPredictor(object):\n \"\"\"BoxPredictor.\"\"\"\n\n def __init__(self, is_training, num_classes):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n \"\"\"\n self._is_training = is_training\n self._num_classes = num_classes\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def predict(self, image_features, num_predictions_per_location, scope,\n **params):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Takes a high level image feature map as input and produce two predictions,\n (1) a tensor encoding box locations, and\n (2) a tensor encoding class scores for each corresponding box.\n In this interface, we only assume that two tensors are returned as output\n and do not assume anything about their shapes.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n scope: Variable and Op scope name.\n **params: Additional keyword arguments for specific implementations of\n BoxPredictor.\n\n Returns:\n A dictionary containing at least the following tensors.\n box_encodings: A float tensor of shape\n [batch_size, num_anchors, q, code_size] representing the location of\n the objects, where q is 1 or the number of classes.\n class_predictions_with_background: A float tensor of shape\n [batch_size, num_anchors, num_classes + 1] representing the class\n predictions for the proposals.\n \"\"\"\n with tf.variable_scope(scope):\n return self._predict(image_features, num_predictions_per_location,\n **params)\n\n # TODO: num_predictions_per_location could be moved to constructor.\n # This is currently only used by ConvolutionalBoxPredictor.\n @abstractmethod\n def _predict(self, image_features, num_predictions_per_location, **params):\n \"\"\"Implementations must override this method.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n **params: Additional keyword arguments for specific implementations of\n BoxPredictor.\n\n Returns:\n A dictionary containing at least the following tensors.\n box_encodings: A float tensor of shape\n [batch_size, num_anchors, q, code_size] representing the location of\n the objects, where q is 1 or the number of classes.\n class_predictions_with_background: A float tensor of shape\n [batch_size, num_anchors, num_classes + 1] representing the class\n predictions for the proposals.\n \"\"\"\n pass\n\n\nclass RfcnBoxPredictor(BoxPredictor):\n \"\"\"RFCN Box Predictor.\n\n Applies a position sensitve ROI pooling on position sensitive feature maps to\n predict classes and refined locations. See https://arxiv.org/abs/1605.06409\n for details.\n\n This is used for the second stage of the RFCN meta architecture. Notice that\n locations are *not* shared across classes, thus for each anchor, a separate\n prediction is made for each class.\n \"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n conv_hyperparams,\n num_spatial_bins,\n depth,\n crop_size,\n box_code_size):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n conv_hyperparams: Slim arg_scope with hyperparameters for conolutional\n layers.\n num_spatial_bins: A list of two integers `[spatial_bins_y,\n spatial_bins_x]`.\n depth: Target depth to reduce the input feature maps to.\n crop_size: A list of two integers `[crop_height, crop_width]`.\n box_code_size: Size of encoding for each box.\n \"\"\"\n super(RfcnBoxPredictor, self).__init__(is_training, num_classes)\n self._conv_hyperparams = conv_hyperparams\n self._num_spatial_bins = num_spatial_bins\n self._depth = depth\n self._crop_size = crop_size\n self._box_code_size = box_code_size\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def _predict(self, image_features, num_predictions_per_location,\n proposal_boxes):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n Currently, this must be set to 1, or an error will be raised.\n proposal_boxes: A float tensor of shape [batch_size, num_proposals,\n box_code_size].\n\n Returns:\n box_encodings: A float tensor of shape\n [batch_size, 1, num_classes, code_size] representing the\n location of the objects.\n class_predictions_with_background: A float tensor of shape\n [batch_size, 1, num_classes + 1] representing the class\n predictions for the proposals.\n Raises:\n ValueError: if num_predictions_per_location is not 1.\n \"\"\"\n if num_predictions_per_location != 1:\n raise ValueError('Currently RfcnBoxPredictor only supports '\n 'predicting a single box per class per location.')\n\n batch_size = tf.shape(proposal_boxes)[0]\n num_boxes = tf.shape(proposal_boxes)[1]\n def get_box_indices(proposals):\n proposals_shape = proposals.get_shape().as_list()\n if any(dim is None for dim in proposals_shape):\n proposals_shape = tf.shape(proposals)\n ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)\n multiplier = tf.expand_dims(\n tf.range(start=0, limit=proposals_shape[0]), 1)\n return tf.reshape(ones_mat * multiplier, [-1])\n\n net = image_features\n with slim.arg_scope(self._conv_hyperparams):\n net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth')\n # Location predictions.\n location_feature_map_depth = (self._num_spatial_bins[0] *\n self._num_spatial_bins[1] *\n self.num_classes *\n self._box_code_size)\n location_feature_map = slim.conv2d(net, location_feature_map_depth,\n [1, 1], activation_fn=None,\n scope='refined_locations')\n box_encodings = ops.position_sensitive_crop_regions(\n location_feature_map,\n boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]),\n box_ind=get_box_indices(proposal_boxes),\n crop_size=self._crop_size,\n num_spatial_bins=self._num_spatial_bins,\n global_pool=True)\n box_encodings = tf.squeeze(box_encodings, squeeze_dims=[1, 2])\n box_encodings = tf.reshape(box_encodings,\n [batch_size * num_boxes, 1, self.num_classes,\n self._box_code_size])\n\n # Class predictions.\n total_classes = self.num_classes + 1 # Account for background class.\n class_feature_map_depth = (self._num_spatial_bins[0] *\n self._num_spatial_bins[1] *\n total_classes)\n class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1],\n activation_fn=None,\n scope='class_predictions')\n class_predictions_with_background = ops.position_sensitive_crop_regions(\n class_feature_map,\n boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]),\n box_ind=get_box_indices(proposal_boxes),\n crop_size=self._crop_size,\n num_spatial_bins=self._num_spatial_bins,\n global_pool=True)\n class_predictions_with_background = tf.squeeze(\n class_predictions_with_background, squeeze_dims=[1, 2])\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background,\n [batch_size * num_boxes, 1, total_classes])\n\n return {BOX_ENCODINGS: box_encodings,\n CLASS_PREDICTIONS_WITH_BACKGROUND:\n class_predictions_with_background}\n\n\nclass MaskRCNNBoxPredictor(BoxPredictor):\n \"\"\"Mask R-CNN Box Predictor.\n\n See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017).\n Mask R-CNN. arXiv preprint arXiv:1703.06870.\n\n This is used for the second stage of the Mask R-CNN detector where proposals\n cropped from an image are arranged along the batch dimension of the input\n image_features tensor. Notice that locations are *not* shared across classes,\n thus for each anchor, a separate prediction is made for each class.\n\n In addition to predicting boxes and classes, optionally this class allows\n predicting masks and/or keypoints inside detection boxes.\n\n Currently this box predictor makes per-class predictions; that is, each\n anchor makes a separate box prediction for each class.\n \"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n fc_hyperparams,\n use_dropout,\n dropout_keep_prob,\n box_code_size,\n conv_hyperparams=None,\n predict_instance_masks=False,\n mask_height=14,\n mask_width=14,\n mask_prediction_conv_depth=256,\n predict_keypoints=False):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n fc_hyperparams: Slim arg_scope with hyperparameters for fully\n connected ops.\n use_dropout: Option to use dropout or not. Note that a single dropout\n op is applied here prior to both box and class predictions, which stands\n in contrast to the ConvolutionalBoxPredictor below.\n dropout_keep_prob: Keep probability for dropout.\n This is only used if use_dropout is True.\n box_code_size: Size of encoding for each box.\n conv_hyperparams: Slim arg_scope with hyperparameters for convolution\n ops.\n predict_instance_masks: Whether to predict object masks inside detection\n boxes.\n mask_height: Desired output mask height. The default value is 14.\n mask_width: Desired output mask width. The default value is 14.\n mask_prediction_conv_depth: The depth for the first conv2d_transpose op\n applied to the image_features in the mask prediciton branch.\n predict_keypoints: Whether to predict keypoints insde detection boxes.\n\n\n Raises:\n ValueError: If predict_instance_masks or predict_keypoints is true.\n \"\"\"\n super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes)\n self._fc_hyperparams = fc_hyperparams\n self._use_dropout = use_dropout\n self._box_code_size = box_code_size\n self._dropout_keep_prob = dropout_keep_prob\n self._conv_hyperparams = conv_hyperparams\n self._predict_instance_masks = predict_instance_masks\n self._mask_height = mask_height\n self._mask_width = mask_width\n self._mask_prediction_conv_depth = mask_prediction_conv_depth\n self._predict_keypoints = predict_keypoints\n if self._predict_keypoints:\n raise ValueError('Keypoint prediction is unimplemented.')\n if ((self._predict_instance_masks or self._predict_keypoints) and\n self._conv_hyperparams is None):\n raise ValueError('`conv_hyperparams` must be provided when predicting '\n 'masks.')\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def _predict(self, image_features, num_predictions_per_location):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Flattens image_features and applies fully connected ops (with no\n non-linearity) to predict box encodings and class predictions. In this\n setting, anchors are not spatially arranged in any way and are assumed to\n have been folded into the batch dimension. Thus we output 1 for the\n anchors dimension.\n\n Also optionally predicts instance masks.\n The mask prediction head is based on the Mask RCNN paper with the following\n modifications: We replace the deconvolution layer with a bilinear resize\n and a convolution.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n Currently, this must be set to 1, or an error will be raised.\n\n Returns:\n A dictionary containing the following tensors.\n box_encodings: A float tensor of shape\n [batch_size, 1, num_classes, code_size] representing the\n location of the objects.\n class_predictions_with_background: A float tensor of shape\n [batch_size, 1, num_classes + 1] representing the class\n predictions for the proposals.\n If predict_masks is True the dictionary also contains:\n instance_masks: A float tensor of shape\n [batch_size, 1, num_classes, image_height, image_width]\n If predict_keypoints is True the dictionary also contains:\n keypoints: [batch_size, 1, num_keypoints, 2]\n\n Raises:\n ValueError: if num_predictions_per_location is not 1.\n \"\"\"\n if num_predictions_per_location != 1:\n raise ValueError('Currently FullyConnectedBoxPredictor only supports '\n 'predicting a single box per class per location.')\n spatial_averaged_image_features = tf.reduce_mean(image_features, [1, 2],\n keep_dims=True,\n name='AvgPool')\n flattened_image_features = slim.flatten(spatial_averaged_image_features)\n if self._use_dropout:\n flattened_image_features = slim.dropout(flattened_image_features,\n keep_prob=self._dropout_keep_prob,\n is_training=self._is_training)\n with slim.arg_scope(self._fc_hyperparams):\n box_encodings = slim.fully_connected(\n flattened_image_features,\n self._num_classes * self._box_code_size,\n activation_fn=None,\n scope='BoxEncodingPredictor')\n class_predictions_with_background = slim.fully_connected(\n flattened_image_features,\n self._num_classes + 1,\n activation_fn=None,\n scope='ClassPredictor')\n box_encodings = tf.reshape(\n box_encodings, [-1, 1, self._num_classes, self._box_code_size])\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background, [-1, 1, self._num_classes + 1])\n\n predictions_dict = {\n BOX_ENCODINGS: box_encodings,\n CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background\n }\n\n if self._predict_instance_masks:\n with slim.arg_scope(self._conv_hyperparams):\n upsampled_features = tf.image.resize_bilinear(\n image_features,\n [self._mask_height, self._mask_width],\n align_corners=True)\n upsampled_features = slim.conv2d(\n upsampled_features,\n num_outputs=self._mask_prediction_conv_depth,\n kernel_size=[2, 2])\n mask_predictions = slim.conv2d(upsampled_features,\n num_outputs=self.num_classes,\n activation_fn=None,\n kernel_size=[3, 3])\n instance_masks = tf.expand_dims(tf.transpose(mask_predictions,\n perm=[0, 3, 1, 2]),\n axis=1,\n name='MaskPredictor')\n predictions_dict[MASK_PREDICTIONS] = instance_masks\n return predictions_dict\n\n\nclass ConvolutionalBoxPredictor(BoxPredictor):\n \"\"\"Convolutional Box Predictor.\n\n Optionally add an intermediate 1x1 convolutional layer after features and\n predict in parallel branches box_encodings and\n class_predictions_with_background.\n\n Currently this box predictor assumes that predictions are \"shared\" across\n classes --- that is each anchor makes box predictions which do not depend\n on class.\n \"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n conv_hyperparams,\n min_depth,\n max_depth,\n num_layers_before_predictor,\n use_dropout,\n dropout_keep_prob,\n kernel_size,\n box_code_size,\n apply_sigmoid_to_scores=False,\n class_prediction_bias_init=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n conv_hyperparams: Slim arg_scope with hyperparameters for convolution ops.\n min_depth: Minumum feature depth prior to predicting box encodings\n and class predictions.\n max_depth: Maximum feature depth prior to predicting box encodings\n and class predictions. If max_depth is set to 0, no additional\n feature map will be inserted before location and class predictions.\n num_layers_before_predictor: Number of the additional conv layers before\n the predictor.\n use_dropout: Option to use dropout for class prediction or not.\n dropout_keep_prob: Keep probability for dropout.\n This is only used if use_dropout is True.\n kernel_size: Size of final convolution kernel. If the\n spatial resolution of the feature map is smaller than the kernel size,\n then the kernel size is automatically set to be\n min(feature_width, feature_height).\n box_code_size: Size of encoding for each box.\n apply_sigmoid_to_scores: if True, apply the sigmoid on the output\n class_predictions.\n class_prediction_bias_init: constant value to initialize bias of the last\n conv2d layer before class prediction.\n\n Raises:\n ValueError: if min_depth > max_depth.\n \"\"\"\n super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes)\n if min_depth > max_depth:\n raise ValueError('min_depth should be less than or equal to max_depth')\n self._conv_hyperparams = conv_hyperparams\n self._min_depth = min_depth\n self._max_depth = max_depth\n self._num_layers_before_predictor = num_layers_before_predictor\n self._use_dropout = use_dropout\n self._kernel_size = kernel_size\n self._box_code_size = box_code_size\n self._dropout_keep_prob = dropout_keep_prob\n self._apply_sigmoid_to_scores = apply_sigmoid_to_scores\n self._class_prediction_bias_init = class_prediction_bias_init\n\n def _predict(self, image_features, num_predictions_per_location):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n\n Returns:\n A dictionary containing the following tensors.\n box_encodings: A float tensor of shape [batch_size, num_anchors, 1,\n code_size] representing the location of the objects, where\n num_anchors = feat_height * feat_width * num_predictions_per_location\n class_predictions_with_background: A float tensor of shape\n [batch_size, num_anchors, num_classes + 1] representing the class\n predictions for the proposals.\n \"\"\"\n # Add a slot for the background class.\n num_class_slots = self.num_classes + 1\n net = image_features\n with slim.arg_scope(self._conv_hyperparams), \\\n slim.arg_scope([slim.dropout], is_training=self._is_training):\n # Add additional conv layers before the class predictor.\n features_depth = static_shape.get_depth(image_features.get_shape())\n depth = max(min(features_depth, self._max_depth), self._min_depth)\n tf.logging.info('depth of additional conv before box predictor: {}'.\n format(depth))\n if depth > 0 and self._num_layers_before_predictor > 0:\n for i in range(self._num_layers_before_predictor):\n net = slim.conv2d(\n net, depth, [1, 1], scope='Conv2d_%d_1x1_%d' % (i, depth))\n with slim.arg_scope([slim.conv2d], activation_fn=None,\n normalizer_fn=None, normalizer_params=None):\n box_encodings = slim.conv2d(\n net, num_predictions_per_location * self._box_code_size,\n [self._kernel_size, self._kernel_size],\n scope='BoxEncodingPredictor')\n if self._use_dropout:\n net = slim.dropout(net, keep_prob=self._dropout_keep_prob)\n class_predictions_with_background = slim.conv2d(\n net, num_predictions_per_location * num_class_slots,\n [self._kernel_size, self._kernel_size], scope='ClassPredictor',\n biases_initializer=tf.constant_initializer(\n self._class_prediction_bias_init))\n if self._apply_sigmoid_to_scores:\n class_predictions_with_background = tf.sigmoid(\n class_predictions_with_background)\n\n combined_feature_map_shape = shape_utils.combined_static_and_dynamic_shape(\n image_features)\n box_encodings = tf.reshape(\n box_encodings, tf.stack([combined_feature_map_shape[0],\n combined_feature_map_shape[1] *\n combined_feature_map_shape[2] *\n num_predictions_per_location,\n 1, self._box_code_size]))\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background,\n tf.stack([combined_feature_map_shape[0],\n combined_feature_map_shape[1] *\n combined_feature_map_shape[2] *\n num_predictions_per_location,\n num_class_slots]))\n return {BOX_ENCODINGS: box_encodings,\n CLASS_PREDICTIONS_WITH_BACKGROUND:\n class_predictions_with_background}\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Region Similarity Calculators for BoxLists.\n\nRegion Similarity Calculators compare a pairwise measure of similarity\nbetween the boxes in two BoxLists.\n\"\"\"\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nimport tensorflow as tf\n\nfrom app.object_detection.core import box_list_ops\n\n\nclass RegionSimilarityCalculator(object):\n \"\"\"Abstract base class for region similarity calculator.\"\"\"\n __metaclass__ = ABCMeta\n\n def compare(self, boxlist1, boxlist2, scope=None):\n \"\"\"Computes matrix of pairwise similarity between BoxLists.\n\n This op (to be overriden) computes a measure of pairwise similarity between\n the boxes in the given BoxLists. Higher values indicate more similarity.\n\n Note that this method simply measures similarity and does not explicitly\n perform a matching.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n scope: Op scope name. Defaults to 'Compare' if None.\n\n Returns:\n a (float32) tensor of shape [N, M] with pairwise similarity score.\n \"\"\"\n with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:\n return self._compare(boxlist1, boxlist2)\n\n @abstractmethod\n def _compare(self, boxlist1, boxlist2):\n pass\n\n\nclass IouSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on Intersection over Union (IOU) metric.\n\n This class computes pairwise similarity between two BoxLists based on IOU.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute pairwise IOU similarity between the two BoxLists.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing pairwise iou scores.\n \"\"\"\n return box_list_ops.iou(boxlist1, boxlist2)\n\n\nclass NegSqDistSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on the squared distance metric.\n\n This class computes pairwise similarity between two BoxLists based on the\n negative squared distance metric.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute matrix of (negated) sq distances.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing negated pairwise squared distance.\n \"\"\"\n return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)\n\n\nclass IoaSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on Intersection over Area (IOA) metric.\n\n This class computes pairwise similarity between two BoxLists based on their\n pairwise intersections divided by the areas of second BoxLists.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute pairwise IOA similarity between the two BoxLists.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing pairwise IOA scores.\n \"\"\"\n return box_list_ops.ioa(boxlist1, boxlist2)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.core.keypoint_ops.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom app.object_detection.core import keypoint_ops\n\n\nclass KeypointOpsTest(tf.test.TestCase):\n \"\"\"Tests for common keypoint operations.\"\"\"\n\n def test_scale(self):\n keypoints = tf.constant([\n [[0.0, 0.0], [100.0, 200.0]],\n [[50.0, 120.0], [100.0, 140.0]]\n ])\n y_scale = tf.constant(1.0 / 100)\n x_scale = tf.constant(1.0 / 200)\n\n expected_keypoints = tf.constant([\n [[0., 0.], [1.0, 1.0]],\n [[0.5, 0.6], [1.0, 0.7]]\n ])\n output = keypoint_ops.scale(keypoints, y_scale, x_scale)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_clip_to_window(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.25], [0.75, 0.75]]\n ])\n output = keypoint_ops.clip_to_window(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_prune_outside_window(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]],\n [[np.nan, np.nan], [np.nan, np.nan]]])\n output = keypoint_ops.prune_outside_window(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_change_coordinate_frame(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([\n [[0, 0.5], [1.0, 1.0]],\n [[0.5, -0.5], [1.5, 1.5]]\n ])\n output = keypoint_ops.change_coordinate_frame(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_normalized_coordinates(self):\n keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n output = keypoint_ops.to_normalized_coordinates(\n keypoints, 40, 60)\n expected_keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_normalized_coordinates_already_normalized(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n output = keypoint_ops.to_normalized_coordinates(\n keypoints, 40, 60)\n\n with self.test_session() as sess:\n with self.assertRaisesOpError('assertion failed'):\n sess.run(output)\n\n def test_to_absolute_coordinates(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n output = keypoint_ops.to_absolute_coordinates(\n keypoints, 40, 60)\n expected_keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_absolute_coordinates_already_absolute(self):\n keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n output = keypoint_ops.to_absolute_coordinates(\n keypoints, 40, 60)\n\n with self.test_session() as sess:\n with self.assertRaisesOpError('assertion failed'):\n sess.run(output)\n\n def test_flip_horizontal(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]\n ])\n flip_permutation = [0, 2, 1]\n\n expected_keypoints = tf.constant([\n [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]],\n [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]],\n ])\n output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_flip_vertical(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]\n ])\n flip_permutation = [0, 2, 1]\n\n expected_keypoints = tf.constant([\n [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],\n [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]],\n ])\n output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_rot90(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]\n ])\n expected_keypoints = tf.constant([\n [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],\n [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]],\n ])\n output = keypoint_ops.rot90(keypoints)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.image.resize_bilinear",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.reduce_mean",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.squeeze",
"tensorflow.constant_initializer",
"tensorflow.variable_scope"
],
[
"tensorflow.name_scope"
],
[
"tensorflow.constant",
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
afeedh/facenet | [
"a70159a7c9850a49acd789824273b9b8933a61e8"
] | [
"facenet/src/train_tripletloss.py"
] | [
"\"\"\"Training a face recognizer with TensorFlow based on the FaceNet paper\nFaceNet: A Unified Embedding for Face Recognition and Clustering: http://arxiv.org/abs/1503.03832\n\"\"\"\n# MIT License\n#\n# Copyright (c) 2016 David Sandberg\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport time\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport importlib\nimport itertools\nimport argparse\nimport facenet.src.facenet as fc\nfrom facenet.src import lfw\n\nfrom tensorflow.python.ops import data_flow_ops\n\nfrom six.moves import xrange # @UnresolvedImport\n\n\ndef main(args):\n\n network = importlib.import_module(args.model_def)\n\n subdir = datetime.strftime(datetime.now(), \"%Y%m%d-%H%M%S\")\n log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)\n if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist\n os.makedirs(log_dir)\n model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)\n if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist\n os.makedirs(model_dir)\n\n # Write arguments to a text file\n fc.write_arguments_to_file(args, os.path.join(log_dir, \"arguments.txt\"))\n\n # Store some git revision info in a text file in the log directory\n src_path, _ = os.path.split(os.path.realpath(__file__))\n fc.store_revision_info(src_path, log_dir, \" \".join(sys.argv))\n\n np.random.seed(seed=args.seed)\n train_set = fc.get_dataset(args.data_dir)\n\n print(\"Model directory: %s\" % model_dir)\n print(\"Log directory: %s\" % log_dir)\n if args.pretrained_model:\n print(\"Pre-trained model: %s\" % os.path.expanduser(args.pretrained_model))\n\n if args.lfw_dir:\n print(\"LFW directory: %s\" % args.lfw_dir)\n # Read the file containing the pairs used for testing\n pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))\n # Get the paths for the corresponding images\n lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)\n\n with tf.Graph().as_default():\n tf.set_random_seed(args.seed)\n global_step = tf.Variable(0, trainable=False)\n\n # Placeholder for the learning rate\n learning_rate_placeholder = tf.placeholder(tf.float32, name=\"learning_rate\")\n\n batch_size_placeholder = tf.placeholder(tf.int32, name=\"batch_size\")\n\n phase_train_placeholder = tf.placeholder(tf.bool, name=\"phase_train\")\n\n image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 3), name=\"image_paths\")\n labels_placeholder = tf.placeholder(tf.int64, shape=(None, 3), name=\"labels\")\n\n input_queue = data_flow_ops.FIFOQueue(\n capacity=100000, dtypes=[tf.string, tf.int64], shapes=[(3,), (3,)], shared_name=None, name=None\n )\n enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])\n\n nrof_preprocess_threads = 4\n images_and_labels = []\n for _ in range(nrof_preprocess_threads):\n filenames, label = input_queue.dequeue()\n images = []\n for filename in tf.unstack(filenames):\n file_contents = tf.read_file(filename)\n image = tf.image.decode_image(file_contents, channels=3)\n\n if args.random_crop:\n image = tf.random_crop(image, [args.image_size, args.image_size, 3])\n else:\n image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)\n if args.random_flip:\n image = tf.image.random_flip_left_right(image)\n\n # pylint: disable=no-member\n image.set_shape((args.image_size, args.image_size, 3))\n images.append(tf.image.per_image_standardization(image))\n images_and_labels.append([images, label])\n\n image_batch, labels_batch = tf.train.batch_join(\n images_and_labels,\n batch_size=batch_size_placeholder,\n shapes=[(args.image_size, args.image_size, 3), ()],\n enqueue_many=True,\n capacity=4 * nrof_preprocess_threads * args.batch_size,\n allow_smaller_final_batch=True,\n )\n image_batch = tf.identity(image_batch, \"image_batch\")\n image_batch = tf.identity(image_batch, \"input\")\n labels_batch = tf.identity(labels_batch, \"label_batch\")\n\n # Build the inference graph\n prelogits, _ = network.inference(\n image_batch,\n args.keep_probability,\n phase_train=phase_train_placeholder,\n bottleneck_layer_size=args.embedding_size,\n weight_decay=args.weight_decay,\n )\n\n embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name=\"embeddings\")\n # Split embeddings into anchor, positive and negative and calculate triplet loss\n anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)\n triplet_loss = fc.triplet_loss(anchor, positive, negative, args.alpha)\n\n learning_rate = tf.train.exponential_decay(\n learning_rate_placeholder,\n global_step,\n args.learning_rate_decay_epochs * args.epoch_size,\n args.learning_rate_decay_factor,\n staircase=True,\n )\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n # Calculate the total losses\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n([triplet_loss] + regularization_losses, name=\"total_loss\")\n\n # Build a Graph that trains the model with one batch of examples and updates the model parameters\n train_op = fc.train(\n total_loss, global_step, args.optimizer, learning_rate, args.moving_average_decay, tf.global_variables()\n )\n\n # Create a saver\n saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n # Start running operations on the Graph.\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Initialize variables\n sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder: True})\n sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder: True})\n\n summary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(coord=coord, sess=sess)\n\n with sess.as_default():\n\n if args.pretrained_model:\n print(\"Restoring pretrained model: %s\" % args.pretrained_model)\n saver.restore(sess, os.path.expanduser(args.pretrained_model))\n\n # Training and validation loop\n epoch = 0\n while epoch < args.max_nrof_epochs:\n step = sess.run(global_step, feed_dict=None)\n epoch = step // args.epoch_size\n # Train for one epoch\n train(\n args,\n sess,\n train_set,\n epoch,\n image_paths_placeholder,\n labels_placeholder,\n labels_batch,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n input_queue,\n global_step,\n embeddings,\n total_loss,\n train_op,\n summary_op,\n summary_writer,\n args.learning_rate_schedule_file,\n args.embedding_size,\n anchor,\n positive,\n negative,\n triplet_loss,\n )\n\n # Save variables and the metagraph if it doesn't exist already\n save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)\n\n # Evaluate on LFW\n if args.lfw_dir:\n evaluate(\n sess,\n lfw_paths,\n embeddings,\n labels_batch,\n image_paths_placeholder,\n labels_placeholder,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n actual_issame,\n args.batch_size,\n args.lfw_nrof_folds,\n log_dir,\n step,\n summary_writer,\n args.embedding_size,\n )\n\n return model_dir\n\n\ndef train(\n args,\n sess,\n dataset,\n epoch,\n image_paths_placeholder,\n labels_placeholder,\n labels_batch,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n input_queue,\n global_step,\n embeddings,\n loss,\n train_op,\n summary_op,\n summary_writer,\n learning_rate_schedule_file,\n embedding_size,\n anchor,\n positive,\n negative,\n triplet_loss,\n):\n batch_number = 0\n\n if args.learning_rate > 0.0:\n lr = args.learning_rate\n else:\n lr = fc.get_learning_rate_from_file(learning_rate_schedule_file, epoch)\n while batch_number < args.epoch_size:\n # Sample people randomly from the dataset\n image_paths, num_per_class = sample_people(dataset, args.people_per_batch, args.images_per_person)\n\n print(\"Running forward pass on sampled images: \", end=\"\")\n start_time = time.time()\n nrof_examples = args.people_per_batch * args.images_per_person\n labels_array = np.reshape(np.arange(nrof_examples), (-1, 3))\n image_paths_array = np.reshape(np.expand_dims(np.array(image_paths), 1), (-1, 3))\n sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n emb_array = np.zeros((nrof_examples, embedding_size))\n nrof_batches = int(np.ceil(nrof_examples / args.batch_size))\n for i in range(nrof_batches):\n batch_size = min(nrof_examples - i * args.batch_size, args.batch_size)\n emb, lab = sess.run(\n [embeddings, labels_batch],\n feed_dict={\n batch_size_placeholder: batch_size,\n learning_rate_placeholder: lr,\n phase_train_placeholder: True,\n },\n )\n emb_array[lab, :] = emb\n print(\"%.3f\" % (time.time() - start_time))\n\n # Select triplets based on the embeddings\n print(\"Selecting suitable triplets for training\")\n triplets, nrof_random_negs, nrof_triplets = select_triplets(\n emb_array, num_per_class, image_paths, args.people_per_batch, args.alpha\n )\n selection_time = time.time() - start_time\n print(\n \"(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds\"\n % (nrof_random_negs, nrof_triplets, selection_time)\n )\n\n # Perform training on the selected triplets\n nrof_batches = int(np.ceil(nrof_triplets * 3 / args.batch_size))\n triplet_paths = list(itertools.chain(*triplets))\n labels_array = np.reshape(np.arange(len(triplet_paths)), (-1, 3))\n triplet_paths_array = np.reshape(np.expand_dims(np.array(triplet_paths), 1), (-1, 3))\n sess.run(enqueue_op, {image_paths_placeholder: triplet_paths_array, labels_placeholder: labels_array})\n nrof_examples = len(triplet_paths)\n train_time = 0\n i = 0\n emb_array = np.zeros((nrof_examples, embedding_size))\n loss_array = np.zeros((nrof_triplets,))\n summary = tf.Summary()\n step = 0\n while i < nrof_batches:\n start_time = time.time()\n batch_size = min(nrof_examples - i * args.batch_size, args.batch_size)\n feed_dict = {\n batch_size_placeholder: batch_size,\n learning_rate_placeholder: lr,\n phase_train_placeholder: True,\n }\n err, _, step, emb, lab = sess.run(\n [loss, train_op, global_step, embeddings, labels_batch], feed_dict=feed_dict\n )\n emb_array[lab, :] = emb\n loss_array[i] = err\n duration = time.time() - start_time\n print(\n \"Epoch: [%d][%d/%d]\\tTime %.3f\\tLoss %2.3f\" % (epoch, batch_number + 1, args.epoch_size, duration, err)\n )\n batch_number += 1\n i += 1\n train_time += duration\n summary.value.add(tag=\"loss\", simple_value=err)\n\n # Add validation loss and accuracy to summary\n # pylint: disable=maybe-no-member\n summary.value.add(tag=\"time/selection\", simple_value=selection_time)\n summary_writer.add_summary(summary, step)\n return step\n\n\ndef select_triplets(embeddings, nrof_images_per_class, image_paths, people_per_batch, alpha):\n \"\"\"Select the triplets for training\"\"\"\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n\n # VGG Face: Choosing good triplets is crucial and should strike a balance between\n # selecting informative (i.e. challenging) examples and swamping training with examples that\n # are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling\n # the image n at random, but only between the ones that violate the triplet loss margin. The\n # latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than\n # choosing the maximally violating example, as often done in structured output learning.\n\n for i in xrange(people_per_batch):\n nrof_images = int(nrof_images_per_class[i])\n for j in xrange(1, nrof_images):\n a_idx = emb_start_idx + j - 1\n neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)\n for pair in xrange(j, nrof_images): # For every possible positive pair.\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(np.square(embeddings[a_idx] - embeddings[p_idx]))\n neg_dists_sqr[emb_start_idx : emb_start_idx + nrof_images] = np.NaN\n # all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection\n all_neg = np.where(neg_dists_sqr - pos_dist_sqr < alpha)[0] # VGG Face selecction\n nrof_random_negs = all_neg.shape[0]\n if nrof_random_negs > 0:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n triplets.append((image_paths[a_idx], image_paths[p_idx], image_paths[n_idx]))\n # print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' %\n # (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))\n trip_idx += 1\n\n num_trips += 1\n\n emb_start_idx += nrof_images\n\n np.random.shuffle(triplets)\n return triplets, num_trips, len(triplets)\n\n\ndef sample_people(dataset, people_per_batch, images_per_person):\n nrof_images = people_per_batch * images_per_person\n\n # Sample classes from the dataset\n nrof_classes = len(dataset)\n class_indices = np.arange(nrof_classes)\n np.random.shuffle(class_indices)\n\n i = 0\n image_paths = []\n num_per_class = []\n sampled_class_indices = []\n # Sample images from these classes until we have enough\n while len(image_paths) < nrof_images:\n class_index = class_indices[i]\n nrof_images_in_class = len(dataset[class_index])\n image_indices = np.arange(nrof_images_in_class)\n np.random.shuffle(image_indices)\n nrof_images_from_class = min(nrof_images_in_class, images_per_person, nrof_images - len(image_paths))\n idx = image_indices[0:nrof_images_from_class]\n image_paths_for_class = [dataset[class_index].image_paths[j] for j in idx]\n sampled_class_indices += [class_index] * nrof_images_from_class\n image_paths += image_paths_for_class\n num_per_class.append(nrof_images_from_class)\n i += 1\n\n return image_paths, num_per_class\n\n\ndef evaluate(\n sess,\n image_paths,\n embeddings,\n labels_batch,\n image_paths_placeholder,\n labels_placeholder,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n actual_issame,\n batch_size,\n nrof_folds,\n log_dir,\n step,\n summary_writer,\n embedding_size,\n):\n start_time = time.time()\n # Run forward pass to calculate embeddings\n print(\"Running forward pass on LFW images: \", end=\"\")\n\n nrof_images = len(actual_issame) * 2\n assert len(image_paths) == nrof_images\n labels_array = np.reshape(np.arange(nrof_images), (-1, 3))\n image_paths_array = np.reshape(np.expand_dims(np.array(image_paths), 1), (-1, 3))\n sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n emb_array = np.zeros((nrof_images, embedding_size))\n nrof_batches = int(np.ceil(nrof_images / batch_size))\n label_check_array = np.zeros((nrof_images,))\n for i in xrange(nrof_batches):\n batch_size = min(nrof_images - i * batch_size, batch_size)\n emb, lab = sess.run(\n [embeddings, labels_batch],\n feed_dict={\n batch_size_placeholder: batch_size,\n learning_rate_placeholder: 0.0,\n phase_train_placeholder: False,\n },\n )\n emb_array[lab, :] = emb\n label_check_array[lab] = 1\n print(\"%.3f\" % (time.time() - start_time))\n\n assert np.all(label_check_array == 1)\n\n _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds)\n\n print(\"Accuracy: %1.3f+-%1.3f\" % (np.mean(accuracy), np.std(accuracy)))\n print(\"Validation rate: %2.5f+-%2.5f @ FAR=%2.5f\" % (val, val_std, far))\n lfw_time = time.time() - start_time\n # Add validation loss and accuracy to summary\n summary = tf.Summary()\n # pylint: disable=maybe-no-member\n summary.value.add(tag=\"lfw/accuracy\", simple_value=np.mean(accuracy))\n summary.value.add(tag=\"lfw/val_rate\", simple_value=val)\n summary.value.add(tag=\"time/lfw\", simple_value=lfw_time)\n summary_writer.add_summary(summary, step)\n with open(os.path.join(log_dir, \"lfw_result.txt\"), \"at\") as f:\n f.write(\"%d\\t%.5f\\t%.5f\\n\" % (step, np.mean(accuracy), val))\n\n\ndef save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):\n # Save the model checkpoint\n print(\"Saving variables\")\n start_time = time.time()\n checkpoint_path = os.path.join(model_dir, \"model-%s.ckpt\" % model_name)\n saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)\n save_time_variables = time.time() - start_time\n print(\"Variables saved in %.2f seconds\" % save_time_variables)\n metagraph_filename = os.path.join(model_dir, \"model-%s.meta\" % model_name)\n save_time_metagraph = 0\n if not os.path.exists(metagraph_filename):\n print(\"Saving metagraph\")\n start_time = time.time()\n saver.export_meta_graph(metagraph_filename)\n save_time_metagraph = time.time() - start_time\n print(\"Metagraph saved in %.2f seconds\" % save_time_metagraph)\n summary = tf.Summary()\n # pylint: disable=maybe-no-member\n summary.value.add(tag=\"time/save_variables\", simple_value=save_time_variables)\n summary.value.add(tag=\"time/save_metagraph\", simple_value=save_time_metagraph)\n summary_writer.add_summary(summary, step)\n\n\ndef get_learning_rate_from_file(filename, epoch):\n with open(filename, \"r\") as f:\n for line in f.readlines():\n line = line.split(\"#\", 1)[0]\n if line:\n par = line.strip().split(\":\")\n e = int(par[0])\n lr = float(par[1])\n if e <= epoch:\n learning_rate = lr\n else:\n return learning_rate\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--logs_base_dir\", type=str, help=\"Directory where to write event logs.\", default=\"~/logs/facenet\"\n )\n parser.add_argument(\n \"--models_base_dir\",\n type=str,\n help=\"Directory where to write trained models and checkpoints.\",\n default=\"~/models/facenet\",\n )\n parser.add_argument(\n \"--gpu_memory_fraction\",\n type=float,\n help=\"Upper bound on the amount of GPU memory that will be used by the process.\",\n default=1.0,\n )\n parser.add_argument(\"--pretrained_model\", type=str, help=\"Load a pretrained model before training starts.\")\n parser.add_argument(\n \"--data_dir\",\n type=str,\n help=\"Path to the data directory containing aligned face patches.\",\n default=\"~/datasets/casia/casia_maxpy_mtcnnalign_182_160\",\n )\n parser.add_argument(\n \"--model_def\",\n type=str,\n help=\"Model definition. Points to a module containing the definition of the inference graph.\",\n default=\"models.inception_resnet_v1\",\n )\n parser.add_argument(\"--max_nrof_epochs\", type=int, help=\"Number of epochs to run.\", default=500)\n parser.add_argument(\"--batch_size\", type=int, help=\"Number of images to process in a batch.\", default=90)\n parser.add_argument(\"--image_size\", type=int, help=\"Image size (height, width) in pixels.\", default=160)\n parser.add_argument(\"--people_per_batch\", type=int, help=\"Number of people per batch.\", default=45)\n parser.add_argument(\"--images_per_person\", type=int, help=\"Number of images per person.\", default=40)\n parser.add_argument(\"--epoch_size\", type=int, help=\"Number of batches per epoch.\", default=1000)\n parser.add_argument(\"--alpha\", type=float, help=\"Positive to negative triplet distance margin.\", default=0.2)\n parser.add_argument(\"--embedding_size\", type=int, help=\"Dimensionality of the embedding.\", default=128)\n parser.add_argument(\n \"--random_crop\",\n help=\"Performs random cropping of training images. If false, the center image_size pixels from the training images are used. \"\n + \"If the size of the images in the data directory is equal to image_size no cropping is performed\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--random_flip\", help=\"Performs random horizontal flipping of training images.\", action=\"store_true\"\n )\n parser.add_argument(\n \"--keep_probability\",\n type=float,\n help=\"Keep probability of dropout for the fully connected layer(s).\",\n default=1.0,\n )\n parser.add_argument(\"--weight_decay\", type=float, help=\"L2 weight regularization.\", default=0.0)\n parser.add_argument(\n \"--optimizer\",\n type=str,\n choices=[\"ADAGRAD\", \"ADADELTA\", \"ADAM\", \"RMSPROP\", \"MOM\"],\n help=\"The optimization algorithm to use\",\n default=\"ADAGRAD\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n help=\"Initial learning rate. If set to a negative value a learning rate \"\n + 'schedule can be specified in the file \"learning_rate_schedule.txt\"',\n default=0.1,\n )\n parser.add_argument(\n \"--learning_rate_decay_epochs\", type=int, help=\"Number of epochs between learning rate decay.\", default=100\n )\n parser.add_argument(\"--learning_rate_decay_factor\", type=float, help=\"Learning rate decay factor.\", default=1.0)\n parser.add_argument(\n \"--moving_average_decay\",\n type=float,\n help=\"Exponential decay for tracking of training parameters.\",\n default=0.9999,\n )\n parser.add_argument(\"--seed\", type=int, help=\"Random seed.\", default=666)\n parser.add_argument(\n \"--learning_rate_schedule_file\",\n type=str,\n help=\"File containing the learning rate schedule that is used when learning_rate is set to to -1.\",\n default=\"data/learning_rate_schedule.txt\",\n )\n\n # Parameters for validation on LFW\n parser.add_argument(\n \"--lfw_pairs\", type=str, help=\"The file containing the pairs to use for validation.\", default=\"data/pairs.txt\"\n )\n parser.add_argument(\n \"--lfw_dir\", type=str, help=\"Path to the data directory containing aligned face patches.\", default=\"\"\n )\n parser.add_argument(\n \"--lfw_nrof_folds\",\n type=int,\n help=\"Number of folds to use for cross validation. Mainly used for testing.\",\n default=10,\n )\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n main(parse_arguments(sys.argv[1:]))\n"
] | [
[
"tensorflow.global_variables",
"numpy.all",
"tensorflow.GPUOptions",
"numpy.mean",
"tensorflow.image.decode_image",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"numpy.where",
"numpy.random.randint",
"numpy.square",
"tensorflow.Graph",
"tensorflow.image.random_flip_left_right",
"tensorflow.Variable",
"tensorflow.read_file",
"tensorflow.get_collection",
"numpy.arange",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"numpy.ceil",
"numpy.std",
"tensorflow.trainable_variables",
"tensorflow.Summary",
"numpy.zeros",
"tensorflow.nn.l2_normalize",
"tensorflow.unstack",
"tensorflow.train.Coordinator",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.image.per_image_standardization",
"tensorflow.set_random_seed",
"numpy.array",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.summary.FileWriter",
"tensorflow.local_variables_initializer",
"numpy.random.seed",
"tensorflow.train.start_queue_runners",
"tensorflow.reshape",
"numpy.random.shuffle",
"tensorflow.train.batch_join",
"tensorflow.random_crop"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
preylol/pbrt-v3 | [
"29661bf4caf9e2df2bf21f2a28ac8e53b2439f9f"
] | [
"evaluation/scripts/python-scripts/exr2png.py"
] | [
"import os\nimport sys\nimport pyexr\nimport numpy as np\nfrom PIL import Image\nimport re\n \ndef exec():\n filepaths = []\n savepaths = []\n images = []\n maxvalues = []\n # Prep variable\n filelist = os.listdir(\"output\")\n for file in filelist:\n if file.endswith(\".exr\"):\n filepath = os.path.join(\"output\", file)\n savepath = sys.argv[0][:-len(\"exr2png.py\")] + \"../../plots/renders/\"\n image = pyexr.open(filepath).get()\n images.append(image)\n maxvalues.append(np.max(image))\n filepaths.append(filepath)\n scenename = re.match(r\".*(crown|measure-one|villa|killeroo|hair|ecosys|landscape).*\", file)[1]\n savepaths.append(savepath + scenename + \".png\")\n for i in range(len(images)):\n #images[i] *= 16 / maxvalues[i]\n images[i] = np.where(images[i]<=0.0031308,12.92 * images[i], 1.055*(images[i]**(1/2.4)) - 0.055)\n images[i] = np.clip(images[i], 0, 1)\n images[i] = (images[i] * 255).astype(np.uint8)\n Image.fromarray(images[i]).save(savepaths[i])\n \nexec()\n"
] | [
[
"numpy.max",
"numpy.where",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DVM000/keras-yolo3 | [
"ef0baf2ce19b8b637e5535d04ead48020caa06c5"
] | [
"train.py"
] | [
"#! /usr/bin/env python\n\nimport argparse\nimport os\nimport numpy as np\nimport json\nfrom voc import parse_voc_annotation\nfrom yolo import create_yolov3_model, dummy_loss\nfrom generator import BatchGenerator\nfrom utils.utils import normalize, evaluate, makedirs\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.optimizers import Adam\nfrom callbacks import CustomModelCheckpoint, CustomTensorBoard\nfrom utils.multi_gpu_model import multi_gpu_model\nimport tensorflow as tf\nimport tensorflow.keras\nfrom tensorflow.keras.models import load_model\n\n\nconfig = tf.compat.v1.ConfigProto(\n gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.9)\n # device_count = {'GPU': 1}\n)\nconfig.gpu_options.allow_growth = True\nsession = tf.compat.v1.Session(config=config)\ntf.compat.v1.keras.backend.set_session(session)\n\n\n'''def prevent_GPU_overflow( ):\n gpu_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nprevent_GPU_overflow( )\n\ndef divide_GPU( LIMIT=4096 ):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n # Restrict TensorFlow to only allocate 1GB of memory on the first GPU\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT),\n tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT)])\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n print(e)\n\ndivide_GPU( LIMIT=8192 )'''\n\n\ndef create_training_instances(\n train_annot_folder,\n train_image_folder,\n train_cache,\n valid_annot_folder,\n valid_image_folder,\n valid_cache,\n labels,\n):\n # parse annotations of the training set\n train_ints, train_labels = parse_voc_annotation(train_annot_folder, train_image_folder, train_cache, labels)\n\n # parse annotations of the validation set, if any, otherwise split the training set\n if os.path.exists(valid_annot_folder):\n valid_ints, valid_labels = parse_voc_annotation(valid_annot_folder, valid_image_folder, valid_cache, labels)\n else:\n print(\"valid_annot_folder not exists. Spliting the trainining set.\")\n\n train_valid_split = int(0.8*len(train_ints))\n np.random.seed(0)\n np.random.shuffle(train_ints)\n np.random.seed()\n\n valid_ints = train_ints[train_valid_split:]\n train_ints = train_ints[:train_valid_split]\n\n # compare the seen labels with the given labels in config.json\n if len(labels) > 0:\n overlap_labels = set(labels).intersection(set(train_labels.keys()))\n\n print('Seen labels: \\t' + str(train_labels) + '\\n')\n print('Given labels: \\t' + str(labels))\n\n # return None, None, None if some given label is not in the dataset\n if len(overlap_labels) < len(labels):\n print('Some labels have no annotations! Please revise the list of labels in the config.json.')\n return None, None, None\n else:\n print('No labels are provided. Train on all seen labels.')\n print(train_labels)\n labels = train_labels.keys()\n\n max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])\n\n return train_ints, valid_ints, sorted(labels), max_box_per_image\n\ndef create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):\n makedirs(tensorboard_logs)\n \n early_stop = EarlyStopping(\n monitor = 'loss', \n min_delta = 0.01, \n patience = 7, \n mode = 'min', \n verbose = 1\n )\n checkpoint = CustomModelCheckpoint(\n model_to_save = model_to_save,\n filepath = saved_weights_name,# + '{epoch:02d}.h5', \n monitor = 'loss', \n verbose = 1, \n save_best_only = True, #False \n mode = 'min', \n period = 1\n )\n reduce_on_plateau = ReduceLROnPlateau(\n monitor = 'loss',\n factor = 0.1,\n patience = 2,\n verbose = 1,\n mode = 'min',\n epsilon = 0.01,\n cooldown = 0,\n min_lr = 0\n )\n tensorboard = CustomTensorBoard(\n log_dir = tensorboard_logs,\n write_graph = True,\n write_images = True,\n ) \n stop_on_nan = tf.keras.callbacks.TerminateOnNaN()\n return [early_stop, checkpoint, reduce_on_plateau, tensorboard, stop_on_nan]\n\ndef create_model(\n nb_class, \n anchors, \n max_box_per_image, \n max_grid, batch_size, \n warmup_batches, \n ignore_thresh, \n multi_gpu, \n saved_weights_name, \n lr,\n grid_scales,\n obj_scale,\n noobj_scale,\n xywh_scale,\n class_scale \n):\n if multi_gpu > 1:\n with tf.device('/cpu:0'):\n template_model, infer_model = create_yolov3_model(\n nb_class = nb_class, \n anchors = anchors, \n max_box_per_image = max_box_per_image, \n max_grid = max_grid, \n batch_size = batch_size//multi_gpu, \n warmup_batches = warmup_batches,\n ignore_thresh = ignore_thresh,\n grid_scales = grid_scales,\n obj_scale = obj_scale,\n noobj_scale = noobj_scale,\n xywh_scale = xywh_scale,\n class_scale = class_scale\n )\n else:\n template_model, infer_model = create_yolov3_model(\n nb_class = nb_class, \n anchors = anchors, \n max_box_per_image = max_box_per_image, \n max_grid = max_grid, \n batch_size = batch_size, \n warmup_batches = warmup_batches,\n ignore_thresh = ignore_thresh,\n grid_scales = grid_scales,\n obj_scale = obj_scale,\n noobj_scale = noobj_scale,\n xywh_scale = xywh_scale,\n class_scale = class_scale\n ) \n\n # load the pretrained weight if exists, otherwise load the backend weight only\n if os.path.exists(saved_weights_name): \n print(\"\\nLoading pretrained weights.\\n\")\n template_model.load_weights(saved_weights_name)\n else:\n template_model.load_weights(\"backend.h5\", by_name=True) \n\n if multi_gpu > 1:\n train_model = multi_gpu_model(template_model, gpus=multi_gpu)\n else:\n train_model = template_model \n\n #optimizer = Adam(lr=lr, clipnorm=0.001)\n optimizer = tensorflow.keras.optimizers.RMSprop(lr=lr)\n train_model.compile(loss=dummy_loss, optimizer=optimizer) \n\n return train_model, infer_model\n\ndef _main_(args):\n config_path = args.conf\n\n with open(config_path) as config_buffer: \n config = json.loads(config_buffer.read())\n\n ###############################\n # Parse the annotations \n ###############################\n train_ints, valid_ints, labels, max_box_per_image = create_training_instances(\n config['train']['train_annot_folder'],\n config['train']['train_image_folder'],\n config['train']['cache_name'],\n config['valid']['valid_annot_folder'],\n config['valid']['valid_image_folder'],\n config['valid']['cache_name'],\n config['model']['labels']\n )\n print('\\nTraining on: \\t' + str(labels) + '\\n')\n\n ###############################\n # Create the generators \n ############################### \n train_generator = BatchGenerator(\n instances = train_ints, \n anchors = config['model']['anchors'], \n labels = labels, \n downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3\n max_box_per_image = max_box_per_image,\n batch_size = config['train']['batch_size'],\n min_net_size = config['model']['min_input_size'],\n max_net_size = config['model']['max_input_size'], \n shuffle = True, \n jitter = 0.3, \n norm = normalize\n )\n \n valid_generator = BatchGenerator(\n instances = valid_ints, \n anchors = config['model']['anchors'], \n labels = labels, \n downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3\n max_box_per_image = max_box_per_image,\n batch_size = config['train']['batch_size'],\n min_net_size = config['model']['min_input_size'],\n max_net_size = config['model']['max_input_size'], \n shuffle = True, \n jitter = 0.0, \n norm = normalize\n )\n\n ###############################\n # Create the model \n ###############################\n if os.path.exists(config['train']['saved_weights_name']): \n config['train']['warmup_epochs'] = 0\n warmup_batches = config['train']['warmup_epochs'] * (config['train']['train_times']*len(train_generator)) \n\n os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']\n multi_gpu = len(config['train']['gpus'].split(','))\n\n train_model, infer_model = create_model(\n nb_class = len(labels), \n anchors = config['model']['anchors'], \n max_box_per_image = max_box_per_image, \n max_grid = [config['model']['max_input_size'], config['model']['max_input_size']], \n batch_size = config['train']['batch_size'], \n warmup_batches = warmup_batches,\n ignore_thresh = config['train']['ignore_thresh'],\n multi_gpu = multi_gpu,\n saved_weights_name = config['train']['saved_weights_name'],\n lr = config['train']['learning_rate'],\n grid_scales = config['train']['grid_scales'],\n obj_scale = config['train']['obj_scale'],\n noobj_scale = config['train']['noobj_scale'],\n xywh_scale = config['train']['xywh_scale'],\n class_scale = config['train']['class_scale'],\n )\n\n ###############################\n # Kick off the training\n ###############################\n callbacks = create_callbacks(config['train']['saved_weights_name'], config['train']['tensorboard_dir'], infer_model)\n\n ### DELIA\n #train_model.summary()\n #tf.keras.backend.set_learning_phase(1) \n #tf.compat.v1.disable_eager_execution()\n tf.compat.v1.keras.backend.get_session().run(tf.compat.v1.global_variables_initializer())\n #######\n\n train_model.fit_generator(\n generator = train_generator, \n steps_per_epoch = len(train_generator) * config['train']['train_times'], \n epochs = config['train']['nb_epochs'] + config['train']['warmup_epochs'], \n verbose = 2 if config['train']['debug'] else 1,\n callbacks = callbacks, \n #workers = 4,\n #max_queue_size = 8\n )\n\n # make a GPU version of infer_model for evaluation\n if multi_gpu > 1:\n infer_model = load_model(config['train']['saved_weights_name'])\n\n ###############################\n # Run the evaluation\n ############################### \n # compute mAP for all the classes\n average_precisions = evaluate(infer_model, valid_generator)\n\n # print the score\n for label, average_precision in average_precisions.items():\n print(labels[label] + ': {:.4f}'.format(average_precision))\n print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions))) \n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='train and evaluate YOLO_v3 model on any dataset')\n argparser.add_argument('-c', '--conf', help='path to configuration file') \n\n args = argparser.parse_args()\n _main_(args)\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.device",
"tensorflow.compat.v1.GPUOptions",
"numpy.random.seed",
"tensorflow.compat.v1.keras.backend.set_session",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.compat.v1.keras.backend.get_session",
"numpy.random.shuffle",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.keras.callbacks.TerminateOnNaN",
"tensorflow.keras.callbacks.EarlyStopping"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
KiryanovKD/models | [
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e",
"e17080247e3c9b3301680f61b8f4815c22509e7e"
] | [
"official/nlp/modeling/networks/albert_encoder.py",
"official/legacy/image_classification/mnist_test.py",
"official/legacy/detection/modeling/architecture/spinenet.py",
"official/nlp/transformer/transformer_main_test.py",
"official/legacy/detection/evaluation/coco_utils.py",
"official/vision/utils/object_detection/preprocessor.py",
"official/modeling/multitask/task_sampler.py",
"official/projects/yt8m/tasks/yt8m_task.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.\"\"\"\n# pylint: disable=g-classes-have-attributes\nimport collections\nimport tensorflow as tf\n\nfrom official.modeling import activations\nfrom official.nlp.modeling import layers\n\n\[email protected]_keras_serializable(package='Text')\nclass AlbertEncoder(tf.keras.Model):\n \"\"\"ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.\n\n This network implements the encoder described in the paper \"ALBERT: A Lite\n BERT for Self-supervised Learning of Language Representations\"\n (https://arxiv.org/abs/1909.11942).\n\n Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes\n embedding parameters into two smaller matrices and shares parameters\n across layers.\n\n The default values for this object are taken from the ALBERT-Base\n implementation described in the paper.\n\n *Note* that the network is constructed by Keras Functional API.\n\n Args:\n vocab_size: The size of the token vocabulary.\n embedding_width: The width of the word embeddings. If the embedding width is\n not equal to hidden size, embedding parameters will be factorized into two\n matrices in the shape of `(vocab_size, embedding_width)` and\n `(embedding_width, hidden_size)`, where `embedding_width` is usually much\n smaller than `hidden_size`.\n hidden_size: The size of the transformer hidden layers.\n num_layers: The number of transformer layers.\n num_attention_heads: The number of attention heads for each transformer. The\n hidden size must be divisible by the number of attention heads.\n max_sequence_length: The maximum sequence length that this encoder can\n consume. If None, max_sequence_length uses the value from sequence length.\n This determines the variable shape for positional embeddings.\n type_vocab_size: The number of types that the 'type_ids' input can take.\n intermediate_size: The intermediate size for the transformer layers.\n activation: The activation to use for the transformer layers.\n dropout_rate: The dropout rate to use for the transformer layers.\n attention_dropout_rate: The dropout rate to use for the attention layers\n within the transformer layers.\n initializer: The initialzer to use for all weights in this encoder.\n dict_outputs: Whether to use a dictionary as the model outputs.\n \"\"\"\n\n def __init__(self,\n vocab_size,\n embedding_width=128,\n hidden_size=768,\n num_layers=12,\n num_attention_heads=12,\n max_sequence_length=512,\n type_vocab_size=16,\n intermediate_size=3072,\n activation=activations.gelu,\n dropout_rate=0.1,\n attention_dropout_rate=0.1,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n dict_outputs=False,\n **kwargs):\n activation = tf.keras.activations.get(activation)\n initializer = tf.keras.initializers.get(initializer)\n\n word_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_word_ids')\n mask = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_mask')\n type_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_type_ids')\n\n if embedding_width is None:\n embedding_width = hidden_size\n embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n name='word_embeddings')\n word_embeddings = embedding_layer(word_ids)\n\n # Always uses dynamic slicing for simplicity.\n position_embedding_layer = layers.PositionEmbedding(\n initializer=initializer,\n max_length=max_sequence_length,\n name='position_embedding')\n position_embeddings = position_embedding_layer(word_embeddings)\n\n type_embeddings = (\n layers.OnDeviceEmbedding(\n vocab_size=type_vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n use_one_hot=True,\n name='type_embeddings')(type_ids))\n\n embeddings = tf.keras.layers.Add()(\n [word_embeddings, position_embeddings, type_embeddings])\n embeddings = (\n tf.keras.layers.LayerNormalization(\n name='embeddings/layer_norm',\n axis=-1,\n epsilon=1e-12,\n dtype=tf.float32)(embeddings))\n embeddings = (tf.keras.layers.Dropout(rate=dropout_rate)(embeddings))\n # We project the 'embedding' output to 'hidden_size' if it is not already\n # 'hidden_size'.\n if embedding_width != hidden_size:\n embeddings = tf.keras.layers.experimental.EinsumDense(\n '...x,xy->...y',\n output_shape=hidden_size,\n bias_axes='y',\n kernel_initializer=initializer,\n name='embedding_projection')(\n embeddings)\n\n data = embeddings\n attention_mask = layers.SelfAttentionMask()(data, mask)\n shared_layer = layers.TransformerEncoderBlock(\n num_attention_heads=num_attention_heads,\n inner_dim=intermediate_size,\n inner_activation=activation,\n output_dropout=dropout_rate,\n attention_dropout=attention_dropout_rate,\n kernel_initializer=initializer,\n name='transformer')\n encoder_outputs = []\n for _ in range(num_layers):\n data = shared_layer([data, attention_mask])\n encoder_outputs.append(data)\n\n # Applying a tf.slice op (through subscript notation) to a Keras tensor\n # like this will create a SliceOpLambda layer. This is better than a Lambda\n # layer with Python code, because that is fundamentally less portable.\n first_token_tensor = data[:, 0, :]\n cls_output = tf.keras.layers.Dense(\n units=hidden_size,\n activation='tanh',\n kernel_initializer=initializer,\n name='pooler_transform')(\n first_token_tensor)\n if dict_outputs:\n outputs = dict(\n sequence_output=data,\n encoder_outputs=encoder_outputs,\n pooled_output=cls_output,\n )\n else:\n outputs = [data, cls_output]\n\n # b/164516224\n # Once we've created the network using the Functional API, we call\n # super().__init__ as though we were invoking the Functional API Model\n # constructor, resulting in this object having all the properties of a model\n # created using the Functional API. Once super().__init__ is called, we\n # can assign attributes to `self` - note that all `self` assignments are\n # below this line.\n super(AlbertEncoder, self).__init__(\n inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)\n config_dict = {\n 'vocab_size': vocab_size,\n 'embedding_width': embedding_width,\n 'hidden_size': hidden_size,\n 'num_layers': num_layers,\n 'num_attention_heads': num_attention_heads,\n 'max_sequence_length': max_sequence_length,\n 'type_vocab_size': type_vocab_size,\n 'intermediate_size': intermediate_size,\n 'activation': tf.keras.activations.serialize(activation),\n 'dropout_rate': dropout_rate,\n 'attention_dropout_rate': attention_dropout_rate,\n 'initializer': tf.keras.initializers.serialize(initializer),\n }\n\n # We are storing the config dict as a namedtuple here to ensure checkpoint\n # compatibility with an earlier version of this model which did not track\n # the config dict attribute. TF does not track immutable attrs which\n # do not contain Trackables, so by creating a config namedtuple instead of\n # a dict we avoid tracking it.\n config_cls = collections.namedtuple('Config', config_dict.keys())\n self._config = config_cls(**config_dict)\n self._embedding_layer = embedding_layer\n self._position_embedding_layer = position_embedding_layer\n\n def get_embedding_table(self):\n return self._embedding_layer.embeddings\n\n def get_config(self):\n return dict(self._config._asdict())\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the Keras MNIST model on GPU.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom official.legacy.image_classification import mnist_main\nfrom official.utils.testing import integration\n\n\nmnist_main.define_mnist_flags()\n\n\ndef eager_strategy_combinations():\n return combinations.combine(\n distribution=[\n strategy_combinations.default_strategy,\n strategy_combinations.cloud_tpu_strategy,\n strategy_combinations.one_device_strategy_gpu,\n ],)\n\n\nclass KerasMnistTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Unit tests for sample Keras MNIST model.\"\"\"\n _tempdir = None\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(KerasMnistTest, cls).setUpClass()\n\n def tearDown(self):\n super(KerasMnistTest, self).tearDown()\n tf.io.gfile.rmtree(self.get_temp_dir())\n\n @combinations.generate(eager_strategy_combinations())\n def test_end_to_end(self, distribution):\n \"\"\"Test Keras MNIST model with `strategy`.\"\"\"\n\n extra_flags = [\n \"-train_epochs\",\n \"1\",\n # Let TFDS find the metadata folder automatically\n \"--data_dir=\"\n ]\n\n dummy_data = (\n tf.ones(shape=(10, 28, 28, 1), dtype=tf.int32),\n tf.range(10),\n )\n datasets = (\n tf.data.Dataset.from_tensor_slices(dummy_data),\n tf.data.Dataset.from_tensor_slices(dummy_data),\n )\n\n run = functools.partial(\n mnist_main.run,\n datasets_override=datasets,\n strategy_override=distribution)\n\n integration.run_synthetic(\n main=run,\n synth=False,\n tmp_root=self.create_tempdir().full_path,\n extra_flags=extra_flags)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n# ==============================================================================\n\"\"\"Implementation of SpineNet model.\n\nX. Du, T-Y. Lin, P. Jin, G. Ghiasi, M. Tan, Y. Cui, Q. V. Le, X. Song\nSpineNet: Learning Scale-Permuted Backbone for Recognition and Localization\nhttps://arxiv.org/abs/1912.05027\n\"\"\"\nimport math\n\nfrom absl import logging\nimport tensorflow as tf\nfrom official.legacy.detection.modeling.architecture import nn_blocks\nfrom official.modeling import tf_utils\n\nlayers = tf.keras.layers\n\nFILTER_SIZE_MAP = {\n 1: 32,\n 2: 64,\n 3: 128,\n 4: 256,\n 5: 256,\n 6: 256,\n 7: 256,\n}\n\n# The fixed SpineNet architecture discovered by NAS.\n# Each element represents a specification of a building block:\n# (block_level, block_fn, (input_offset0, input_offset1), is_output).\nSPINENET_BLOCK_SPECS = [\n (2, 'bottleneck', (0, 1), False),\n (4, 'residual', (0, 1), False),\n (3, 'bottleneck', (2, 3), False),\n (4, 'bottleneck', (2, 4), False),\n (6, 'residual', (3, 5), False),\n (4, 'bottleneck', (3, 5), False),\n (5, 'residual', (6, 7), False),\n (7, 'residual', (6, 8), False),\n (5, 'bottleneck', (8, 9), False),\n (5, 'bottleneck', (8, 10), False),\n (4, 'bottleneck', (5, 10), True),\n (3, 'bottleneck', (4, 10), True),\n (5, 'bottleneck', (7, 12), True),\n (7, 'bottleneck', (5, 14), True),\n (6, 'bottleneck', (12, 14), True),\n]\n\nSCALING_MAP = {\n '49S': {\n 'endpoints_num_filters': 128,\n 'filter_size_scale': 0.65,\n 'resample_alpha': 0.5,\n 'block_repeats': 1,\n },\n '49': {\n 'endpoints_num_filters': 256,\n 'filter_size_scale': 1.0,\n 'resample_alpha': 0.5,\n 'block_repeats': 1,\n },\n '96': {\n 'endpoints_num_filters': 256,\n 'filter_size_scale': 1.0,\n 'resample_alpha': 0.5,\n 'block_repeats': 2,\n },\n '143': {\n 'endpoints_num_filters': 256,\n 'filter_size_scale': 1.0,\n 'resample_alpha': 1.0,\n 'block_repeats': 3,\n },\n '190': {\n 'endpoints_num_filters': 512,\n 'filter_size_scale': 1.3,\n 'resample_alpha': 1.0,\n 'block_repeats': 4,\n },\n}\n\n\nclass BlockSpec(object):\n \"\"\"A container class that specifies the block configuration for SpineNet.\"\"\"\n\n def __init__(self, level, block_fn, input_offsets, is_output):\n self.level = level\n self.block_fn = block_fn\n self.input_offsets = input_offsets\n self.is_output = is_output\n\n\ndef build_block_specs(block_specs=None):\n \"\"\"Builds the list of BlockSpec objects for SpineNet.\"\"\"\n if not block_specs:\n block_specs = SPINENET_BLOCK_SPECS\n logging.info('Building SpineNet block specs: %s', block_specs)\n return [BlockSpec(*b) for b in block_specs]\n\n\nclass SpineNet(tf.keras.Model):\n \"\"\"Class to build SpineNet models.\"\"\"\n\n def __init__(self,\n input_specs=tf.keras.layers.InputSpec(shape=[None, 640, 640, 3]),\n min_level=3,\n max_level=7,\n block_specs=build_block_specs(),\n endpoints_num_filters=256,\n resample_alpha=0.5,\n block_repeats=1,\n filter_size_scale=1.0,\n kernel_initializer='VarianceScaling',\n kernel_regularizer=None,\n bias_regularizer=None,\n activation='relu',\n use_sync_bn=False,\n norm_momentum=0.99,\n norm_epsilon=0.001,\n **kwargs):\n \"\"\"SpineNet model.\"\"\"\n self._min_level = min_level\n self._max_level = max_level\n self._block_specs = block_specs\n self._endpoints_num_filters = endpoints_num_filters\n self._resample_alpha = resample_alpha\n self._block_repeats = block_repeats\n self._filter_size_scale = filter_size_scale\n self._kernel_initializer = kernel_initializer\n self._kernel_regularizer = kernel_regularizer\n self._bias_regularizer = bias_regularizer\n self._use_sync_bn = use_sync_bn\n self._norm_momentum = norm_momentum\n self._norm_epsilon = norm_epsilon\n if activation == 'relu':\n self._activation = tf.nn.relu\n elif activation == 'swish':\n self._activation = tf.nn.swish\n else:\n raise ValueError('Activation {} not implemented.'.format(activation))\n self._init_block_fn = 'bottleneck'\n self._num_init_blocks = 2\n\n if use_sync_bn:\n self._norm = layers.experimental.SyncBatchNormalization\n else:\n self._norm = layers.BatchNormalization\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n self._bn_axis = -1\n else:\n self._bn_axis = 1\n\n # Build SpineNet.\n inputs = tf.keras.Input(shape=input_specs.shape[1:])\n\n net = self._build_stem(inputs=inputs)\n net = self._build_scale_permuted_network(\n net=net, input_width=input_specs.shape[1])\n net = self._build_endpoints(net=net)\n\n super(SpineNet, self).__init__(inputs=inputs, outputs=net)\n\n def _block_group(self,\n inputs,\n filters,\n strides,\n block_fn_cand,\n block_repeats=1,\n name='block_group'):\n \"\"\"Creates one group of blocks for the SpineNet model.\"\"\"\n block_fn_candidates = {\n 'bottleneck': nn_blocks.BottleneckBlock,\n 'residual': nn_blocks.ResidualBlock,\n }\n block_fn = block_fn_candidates[block_fn_cand]\n _, _, _, num_filters = inputs.get_shape().as_list()\n\n if block_fn_cand == 'bottleneck':\n use_projection = not (num_filters == (filters * 4) and strides == 1)\n else:\n use_projection = not (num_filters == filters and strides == 1)\n\n x = block_fn(\n filters=filters,\n strides=strides,\n use_projection=use_projection,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activation=self._activation,\n use_sync_bn=self._use_sync_bn,\n norm_momentum=self._norm_momentum,\n norm_epsilon=self._norm_epsilon)(\n inputs)\n for _ in range(1, block_repeats):\n x = block_fn(\n filters=filters,\n strides=1,\n use_projection=False,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activation=self._activation,\n use_sync_bn=self._use_sync_bn,\n norm_momentum=self._norm_momentum,\n norm_epsilon=self._norm_epsilon)(\n x)\n return tf.identity(x, name=name)\n\n def _build_stem(self, inputs):\n \"\"\"Build SpineNet stem.\"\"\"\n x = layers.Conv2D(\n filters=64,\n kernel_size=7,\n strides=2,\n use_bias=False,\n padding='same',\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n inputs)\n x = self._norm(\n axis=self._bn_axis,\n momentum=self._norm_momentum,\n epsilon=self._norm_epsilon)(\n x)\n x = tf_utils.get_activation(self._activation)(x)\n x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n\n net = []\n # Build the initial level 2 blocks.\n for i in range(self._num_init_blocks):\n x = self._block_group(\n inputs=x,\n filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale),\n strides=1,\n block_fn_cand=self._init_block_fn,\n block_repeats=self._block_repeats,\n name='stem_block_{}'.format(i + 1))\n net.append(x)\n return net\n\n def _build_scale_permuted_network(self,\n net,\n input_width,\n weighted_fusion=False):\n \"\"\"Build scale-permuted network.\"\"\"\n net_sizes = [int(math.ceil(input_width / 2**2))] * len(net)\n net_block_fns = [self._init_block_fn] * len(net)\n num_outgoing_connections = [0] * len(net)\n\n endpoints = {}\n for i, block_spec in enumerate(self._block_specs):\n # Find out specs for the target block.\n target_width = int(math.ceil(input_width / 2**block_spec.level))\n target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *\n self._filter_size_scale)\n target_block_fn = block_spec.block_fn\n\n # Resample then merge input0 and input1.\n parents = []\n input0 = block_spec.input_offsets[0]\n input1 = block_spec.input_offsets[1]\n\n x0 = self._resample_with_alpha(\n inputs=net[input0],\n input_width=net_sizes[input0],\n input_block_fn=net_block_fns[input0],\n target_width=target_width,\n target_num_filters=target_num_filters,\n target_block_fn=target_block_fn,\n alpha=self._resample_alpha)\n parents.append(x0)\n num_outgoing_connections[input0] += 1\n\n x1 = self._resample_with_alpha(\n inputs=net[input1],\n input_width=net_sizes[input1],\n input_block_fn=net_block_fns[input1],\n target_width=target_width,\n target_num_filters=target_num_filters,\n target_block_fn=target_block_fn,\n alpha=self._resample_alpha)\n parents.append(x1)\n num_outgoing_connections[input1] += 1\n\n # Merge 0 outdegree blocks to the output block.\n if block_spec.is_output:\n for j, (j_feat,\n j_connections) in enumerate(zip(net, num_outgoing_connections)):\n if j_connections == 0 and (j_feat.shape[2] == target_width and\n j_feat.shape[3] == x0.shape[3]):\n parents.append(j_feat)\n num_outgoing_connections[j] += 1\n\n # pylint: disable=g-direct-tensorflow-import\n if weighted_fusion:\n dtype = parents[0].dtype\n parent_weights = [\n tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(\n i, j)), dtype=dtype)) for j in range(len(parents))]\n weights_sum = tf.add_n(parent_weights)\n parents = [\n parents[i] * parent_weights[i] / (weights_sum + 0.0001)\n for i in range(len(parents))\n ]\n\n # Fuse all parent nodes then build a new block.\n x = tf_utils.get_activation(self._activation)(tf.add_n(parents))\n x = self._block_group(\n inputs=x,\n filters=target_num_filters,\n strides=1,\n block_fn_cand=target_block_fn,\n block_repeats=self._block_repeats,\n name='scale_permuted_block_{}'.format(i + 1))\n\n net.append(x)\n net_sizes.append(target_width)\n net_block_fns.append(target_block_fn)\n num_outgoing_connections.append(0)\n\n # Save output feats.\n if block_spec.is_output:\n if block_spec.level in endpoints:\n raise ValueError('Duplicate feats found for output level {}.'.format(\n block_spec.level))\n if (block_spec.level < self._min_level or\n block_spec.level > self._max_level):\n raise ValueError('Output level is out of range [{}, {}]'.format(\n self._min_level, self._max_level))\n endpoints[block_spec.level] = x\n\n return endpoints\n\n def _build_endpoints(self, net):\n \"\"\"Match filter size for endpoints before sharing conv layers.\"\"\"\n endpoints = {}\n for level in range(self._min_level, self._max_level + 1):\n x = layers.Conv2D(\n filters=self._endpoints_num_filters,\n kernel_size=1,\n strides=1,\n use_bias=False,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n net[level])\n x = self._norm(\n axis=self._bn_axis,\n momentum=self._norm_momentum,\n epsilon=self._norm_epsilon)(\n x)\n x = tf_utils.get_activation(self._activation)(x)\n endpoints[level] = x\n return endpoints\n\n def _resample_with_alpha(self,\n inputs,\n input_width,\n input_block_fn,\n target_width,\n target_num_filters,\n target_block_fn,\n alpha=0.5):\n \"\"\"Match resolution and feature dimension.\"\"\"\n _, _, _, input_num_filters = inputs.get_shape().as_list()\n if input_block_fn == 'bottleneck':\n input_num_filters /= 4\n new_num_filters = int(input_num_filters * alpha)\n\n x = layers.Conv2D(\n filters=new_num_filters,\n kernel_size=1,\n strides=1,\n use_bias=False,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n inputs)\n x = self._norm(\n axis=self._bn_axis,\n momentum=self._norm_momentum,\n epsilon=self._norm_epsilon)(\n x)\n x = tf_utils.get_activation(self._activation)(x)\n\n # Spatial resampling.\n if input_width > target_width:\n x = layers.Conv2D(\n filters=new_num_filters,\n kernel_size=3,\n strides=2,\n padding='SAME',\n use_bias=False,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n x)\n x = self._norm(\n axis=self._bn_axis,\n momentum=self._norm_momentum,\n epsilon=self._norm_epsilon)(\n x)\n x = tf_utils.get_activation(self._activation)(x)\n input_width /= 2\n while input_width > target_width:\n x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)\n input_width /= 2\n elif input_width < target_width:\n scale = target_width // input_width\n x = layers.UpSampling2D(size=(scale, scale))(x)\n\n # Last 1x1 conv to match filter size.\n if target_block_fn == 'bottleneck':\n target_num_filters *= 4\n x = layers.Conv2D(\n filters=target_num_filters,\n kernel_size=1,\n strides=1,\n use_bias=False,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer)(\n x)\n x = self._norm(\n axis=self._bn_axis,\n momentum=self._norm_momentum,\n epsilon=self._norm_epsilon)(\n x)\n\n return x\n\n\nclass SpineNetBuilder(object):\n \"\"\"SpineNet builder.\"\"\"\n\n def __init__(self,\n model_id,\n input_specs=tf.keras.layers.InputSpec(shape=[None, 640, 640, 3]),\n min_level=3,\n max_level=7,\n block_specs=build_block_specs(),\n kernel_initializer='VarianceScaling',\n kernel_regularizer=None,\n bias_regularizer=None,\n activation='relu',\n use_sync_bn=False,\n norm_momentum=0.99,\n norm_epsilon=0.001):\n if model_id not in SCALING_MAP:\n raise ValueError(\n 'SpineNet {} is not a valid architecture.'.format(model_id))\n scaling_params = SCALING_MAP[model_id]\n self._input_specs = input_specs\n self._min_level = min_level\n self._max_level = max_level\n self._block_specs = block_specs\n self._endpoints_num_filters = scaling_params['endpoints_num_filters']\n self._resample_alpha = scaling_params['resample_alpha']\n self._block_repeats = scaling_params['block_repeats']\n self._filter_size_scale = scaling_params['filter_size_scale']\n self._kernel_initializer = kernel_initializer\n self._kernel_regularizer = kernel_regularizer\n self._bias_regularizer = bias_regularizer\n self._activation = activation\n self._use_sync_bn = use_sync_bn\n self._norm_momentum = norm_momentum\n self._norm_epsilon = norm_epsilon\n\n def __call__(self, inputs, is_training=None):\n model = SpineNet(\n input_specs=self._input_specs,\n min_level=self._min_level,\n max_level=self._max_level,\n block_specs=self._block_specs,\n endpoints_num_filters=self._endpoints_num_filters,\n resample_alpha=self._resample_alpha,\n block_repeats=self._block_repeats,\n filter_size_scale=self._filter_size_scale,\n kernel_initializer=self._kernel_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activation=self._activation,\n use_sync_bn=self._use_sync_bn,\n norm_momentum=self._norm_momentum,\n norm_epsilon=self._norm_epsilon)\n return model(inputs)\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test Transformer model.\"\"\"\n\nimport os\nimport re\nimport sys\nimport unittest\n\nfrom absl import flags\nfrom absl.testing import flagsaver\nimport tensorflow as tf\nfrom tensorflow.python.eager import context # pylint: disable=ungrouped-imports\nfrom official.nlp.transformer import misc\nfrom official.nlp.transformer import transformer_main\n\nFLAGS = flags.FLAGS\nFIXED_TIMESTAMP = 'my_time_stamp'\nWEIGHT_PATTERN = re.compile(r'weights-epoch-.+\\.hdf5')\n\n\ndef _generate_file(filepath, lines):\n with open(filepath, 'w') as f:\n for l in lines:\n f.write('{}\\n'.format(l))\n\n\nclass TransformerTaskTest(tf.test.TestCase):\n local_flags = None\n\n def setUp(self): # pylint: disable=g-missing-super-call\n temp_dir = self.get_temp_dir()\n if TransformerTaskTest.local_flags is None:\n misc.define_transformer_flags()\n # Loads flags, array cannot be blank.\n flags.FLAGS(['foo'])\n TransformerTaskTest.local_flags = flagsaver.save_flag_values()\n else:\n flagsaver.restore_flag_values(TransformerTaskTest.local_flags)\n FLAGS.model_dir = os.path.join(temp_dir, FIXED_TIMESTAMP)\n FLAGS.param_set = 'tiny'\n FLAGS.use_synthetic_data = True\n FLAGS.steps_between_evals = 1\n FLAGS.train_steps = 1\n FLAGS.validation_steps = 1\n FLAGS.batch_size = 4\n FLAGS.max_length = 1\n FLAGS.num_gpus = 1\n FLAGS.distribution_strategy = 'off'\n FLAGS.dtype = 'fp32'\n self.model_dir = FLAGS.model_dir\n self.temp_dir = temp_dir\n self.vocab_file = os.path.join(temp_dir, 'vocab')\n self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)['vocab_size']\n self.bleu_source = os.path.join(temp_dir, 'bleu_source')\n self.bleu_ref = os.path.join(temp_dir, 'bleu_ref')\n self.orig_policy = (\n tf.compat.v2.keras.mixed_precision.global_policy())\n\n def tearDown(self): # pylint: disable=g-missing-super-call\n tf.compat.v2.keras.mixed_precision.set_global_policy(self.orig_policy)\n\n def _assert_exists(self, filepath):\n self.assertTrue(os.path.exists(filepath))\n\n def test_train_no_dist_strat(self):\n if context.num_gpus() >= 2:\n self.skipTest('No need to test 2+ GPUs without a distribution strategy.')\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n def test_train_save_full_model(self):\n if context.num_gpus() >= 2:\n self.skipTest('No need to test 2+ GPUs without a distribution strategy.')\n FLAGS.save_weights_only = False\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n def test_train_static_batch(self):\n if context.num_gpus() >= 2:\n self.skipTest('No need to test 2+ GPUs without a distribution strategy.')\n FLAGS.distribution_strategy = 'one_device'\n if tf.test.is_built_with_cuda():\n FLAGS.num_gpus = 1\n else:\n FLAGS.num_gpus = 0\n FLAGS.static_batch = True\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')\n def test_train_1_gpu_with_dist_strat(self):\n FLAGS.distribution_strategy = 'one_device'\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')\n def test_train_fp16(self):\n FLAGS.distribution_strategy = 'one_device'\n FLAGS.dtype = 'fp16'\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')\n def test_train_2_gpu(self):\n if context.num_gpus() < 2:\n self.skipTest(\n '{} GPUs are not available for this test. {} GPUs are available'\n .format(2, context.num_gpus()))\n FLAGS.distribution_strategy = 'mirrored'\n FLAGS.num_gpus = 2\n FLAGS.param_set = 'base'\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')\n def test_train_2_gpu_fp16(self):\n if context.num_gpus() < 2:\n self.skipTest(\n '{} GPUs are not available for this test. {} GPUs are available'\n .format(2, context.num_gpus()))\n FLAGS.distribution_strategy = 'mirrored'\n FLAGS.num_gpus = 2\n FLAGS.param_set = 'base'\n FLAGS.dtype = 'fp16'\n t = transformer_main.TransformerTask(FLAGS)\n t.train()\n\n def _prepare_files_and_flags(self, *extra_flags):\n # Make log dir.\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n\n # Fake vocab, bleu_source and bleu_ref.\n tokens = [\n \"'<pad>'\", \"'<EOS>'\", \"'_'\", \"'a'\", \"'b'\", \"'c'\", \"'d'\", \"'a_'\", \"'b_'\",\n \"'c_'\", \"'d_'\"\n ]\n tokens += [\"'{}'\".format(i) for i in range(self.vocab_size - len(tokens))]\n _generate_file(self.vocab_file, tokens)\n _generate_file(self.bleu_source, ['a b', 'c d'])\n _generate_file(self.bleu_ref, ['a b', 'd c'])\n\n # Update flags.\n update_flags = [\n 'ignored_program_name',\n '--vocab_file={}'.format(self.vocab_file),\n '--bleu_source={}'.format(self.bleu_source),\n '--bleu_ref={}'.format(self.bleu_ref),\n ]\n if extra_flags:\n update_flags.extend(extra_flags)\n FLAGS(update_flags)\n\n def test_predict(self):\n if context.num_gpus() >= 2:\n self.skipTest('No need to test 2+ GPUs without a distribution strategy.')\n self._prepare_files_and_flags()\n t = transformer_main.TransformerTask(FLAGS)\n t.predict()\n\n @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')\n def test_predict_fp16(self):\n if context.num_gpus() >= 2:\n self.skipTest('No need to test 2+ GPUs without a distribution strategy.')\n self._prepare_files_and_flags('--dtype=fp16')\n t = transformer_main.TransformerTask(FLAGS)\n t.predict()\n\n def test_eval(self):\n if context.num_gpus() >= 2:\n self.skipTest('No need to test 2+ GPUs without a distribution strategy.')\n if 'test_xla' in sys.argv[0]:\n self.skipTest('TODO(xla): Make this test faster under XLA.')\n self._prepare_files_and_flags()\n t = transformer_main.TransformerTask(FLAGS)\n t.eval()\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Util functions related to pycocotools and COCO eval.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport json\n\nfrom absl import logging\nimport numpy as np\nfrom PIL import Image\nfrom pycocotools import coco\nfrom pycocotools import mask as mask_api\nimport six\nimport tensorflow as tf\n\nfrom official.legacy.detection.dataloader import tf_example_decoder\nfrom official.legacy.detection.utils import box_utils\nfrom official.legacy.detection.utils import mask_utils\n\n\nclass COCOWrapper(coco.COCO):\n \"\"\"COCO wrapper class.\n\n This class wraps COCO API object, which provides the following additional\n functionalities:\n 1. Support string type image id.\n 2. Support loading the groundtruth dataset using the external annotation\n dictionary.\n 3. Support loading the prediction results using the external annotation\n dictionary.\n \"\"\"\n\n def __init__(self, eval_type='box', annotation_file=None, gt_dataset=None):\n \"\"\"Instantiates a COCO-style API object.\n\n Args:\n eval_type: either 'box' or 'mask'.\n annotation_file: a JSON file that stores annotations of the eval dataset.\n This is required if `gt_dataset` is not provided.\n gt_dataset: the groundtruth eval datatset in COCO API format.\n \"\"\"\n if ((annotation_file and gt_dataset) or\n ((not annotation_file) and (not gt_dataset))):\n raise ValueError('One and only one of `annotation_file` and `gt_dataset` '\n 'needs to be specified.')\n\n if eval_type not in ['box', 'mask']:\n raise ValueError('The `eval_type` can only be either `box` or `mask`.')\n\n coco.COCO.__init__(self, annotation_file=annotation_file)\n self._eval_type = eval_type\n if gt_dataset:\n self.dataset = gt_dataset\n self.createIndex()\n\n def loadRes(self, predictions):\n \"\"\"Loads result file and return a result api object.\n\n Args:\n predictions: a list of dictionary each representing an annotation in COCO\n format. The required fields are `image_id`, `category_id`, `score`,\n `bbox`, `segmentation`.\n\n Returns:\n res: result COCO api object.\n\n Raises:\n ValueError: if the set of image id from predctions is not the subset of\n the set of image id of the groundtruth dataset.\n \"\"\"\n res = coco.COCO()\n res.dataset['images'] = copy.deepcopy(self.dataset['images'])\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n\n image_ids = [ann['image_id'] for ann in predictions]\n if set(image_ids) != (set(image_ids) & set(self.getImgIds())):\n raise ValueError('Results do not correspond to the current dataset!')\n for ann in predictions:\n x1, x2, y1, y2 = [ann['bbox'][0], ann['bbox'][0] + ann['bbox'][2],\n ann['bbox'][1], ann['bbox'][1] + ann['bbox'][3]]\n if self._eval_type == 'box':\n ann['area'] = ann['bbox'][2] * ann['bbox'][3]\n ann['segmentation'] = [\n [x1, y1, x1, y2, x2, y2, x2, y1]]\n elif self._eval_type == 'mask':\n ann['area'] = mask_api.area(ann['segmentation'])\n\n res.dataset['annotations'] = copy.deepcopy(predictions)\n res.createIndex()\n return res\n\n\ndef convert_predictions_to_coco_annotations(predictions):\n \"\"\"Converts a batch of predictions to annotations in COCO format.\n\n Args:\n predictions: a dictionary of lists of numpy arrays including the following\n fields. K below denotes the maximum number of instances per image.\n Required fields:\n - source_id: a list of numpy arrays of int or string of shape\n [batch_size].\n - num_detections: a list of numpy arrays of int of shape [batch_size].\n - detection_boxes: a list of numpy arrays of float of shape\n [batch_size, K, 4], where coordinates are in the original image\n space (not the scaled image space).\n - detection_classes: a list of numpy arrays of int of shape\n [batch_size, K].\n - detection_scores: a list of numpy arrays of float of shape\n [batch_size, K].\n Optional fields:\n - detection_masks: a list of numpy arrays of float of shape\n [batch_size, K, mask_height, mask_width].\n\n Returns:\n coco_predictions: prediction in COCO annotation format.\n \"\"\"\n coco_predictions = []\n num_batches = len(predictions['source_id'])\n batch_size = predictions['source_id'][0].shape[0]\n max_num_detections = predictions['detection_classes'][0].shape[1]\n use_outer_box = 'detection_outer_boxes' in predictions\n for i in range(num_batches):\n predictions['detection_boxes'][i] = box_utils.yxyx_to_xywh(\n predictions['detection_boxes'][i])\n if use_outer_box:\n predictions['detection_outer_boxes'][i] = box_utils.yxyx_to_xywh(\n predictions['detection_outer_boxes'][i])\n mask_boxes = predictions['detection_outer_boxes']\n else:\n mask_boxes = predictions['detection_boxes']\n\n for j in range(batch_size):\n if 'detection_masks' in predictions:\n image_masks = mask_utils.paste_instance_masks(\n predictions['detection_masks'][i][j],\n mask_boxes[i][j],\n int(predictions['image_info'][i][j, 0, 0]),\n int(predictions['image_info'][i][j, 0, 1]))\n binary_masks = (image_masks > 0.0).astype(np.uint8)\n encoded_masks = [\n mask_api.encode(np.asfortranarray(binary_mask))\n for binary_mask in list(binary_masks)]\n for k in range(max_num_detections):\n ann = {}\n ann['image_id'] = predictions['source_id'][i][j]\n ann['category_id'] = predictions['detection_classes'][i][j, k]\n ann['bbox'] = predictions['detection_boxes'][i][j, k]\n ann['score'] = predictions['detection_scores'][i][j, k]\n if 'detection_masks' in predictions:\n ann['segmentation'] = encoded_masks[k]\n coco_predictions.append(ann)\n\n for i, ann in enumerate(coco_predictions):\n ann['id'] = i + 1\n\n return coco_predictions\n\n\ndef convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):\n \"\"\"Converts groundtruths to the dataset in COCO format.\n\n Args:\n groundtruths: a dictionary of numpy arrays including the fields below.\n Note that each element in the list represent the number for a single\n example without batch dimension. K below denotes the actual number of\n instances for each image.\n Required fields:\n - source_id: a list of numpy arrays of int or string of shape\n [batch_size].\n - height: a list of numpy arrays of int of shape [batch_size].\n - width: a list of numpy arrays of int of shape [batch_size].\n - num_detections: a list of numpy arrays of int of shape [batch_size].\n - boxes: a list of numpy arrays of float of shape [batch_size, K, 4],\n where coordinates are in the original image space (not the\n normalized coordinates).\n - classes: a list of numpy arrays of int of shape [batch_size, K].\n Optional fields:\n - is_crowds: a list of numpy arrays of int of shape [batch_size, K]. If\n th field is absent, it is assumed that this instance is not crowd.\n - areas: a list of numy arrays of float of shape [batch_size, K]. If the\n field is absent, the area is calculated using either boxes or\n masks depending on which one is available.\n - masks: a list of numpy arrays of string of shape [batch_size, K],\n label_map: (optional) a dictionary that defines items from the category id\n to the category name. If `None`, collect the category mappping from the\n `groundtruths`.\n\n Returns:\n coco_groundtruths: the groundtruth dataset in COCO format.\n \"\"\"\n source_ids = np.concatenate(groundtruths['source_id'], axis=0)\n heights = np.concatenate(groundtruths['height'], axis=0)\n widths = np.concatenate(groundtruths['width'], axis=0)\n gt_images = [{'id': int(i), 'height': int(h), 'width': int(w)} for i, h, w\n in zip(source_ids, heights, widths)]\n\n gt_annotations = []\n num_batches = len(groundtruths['source_id'])\n batch_size = groundtruths['source_id'][0].shape[0]\n for i in range(num_batches):\n for j in range(batch_size):\n num_instances = groundtruths['num_detections'][i][j]\n for k in range(num_instances):\n ann = {}\n ann['image_id'] = int(groundtruths['source_id'][i][j])\n if 'is_crowds' in groundtruths:\n ann['iscrowd'] = int(groundtruths['is_crowds'][i][j, k])\n else:\n ann['iscrowd'] = 0\n ann['category_id'] = int(groundtruths['classes'][i][j, k])\n boxes = groundtruths['boxes'][i]\n ann['bbox'] = [\n float(boxes[j, k, 1]),\n float(boxes[j, k, 0]),\n float(boxes[j, k, 3] - boxes[j, k, 1]),\n float(boxes[j, k, 2] - boxes[j, k, 0])]\n if 'areas' in groundtruths:\n ann['area'] = float(groundtruths['areas'][i][j, k])\n else:\n ann['area'] = float(\n (boxes[j, k, 3] - boxes[j, k, 1]) *\n (boxes[j, k, 2] - boxes[j, k, 0]))\n if 'masks' in groundtruths:\n mask = Image.open(six.BytesIO(groundtruths['masks'][i][j, k]))\n width, height = mask.size\n np_mask = (\n np.array(mask.getdata()).reshape(height, width).astype(np.uint8))\n np_mask[np_mask > 0] = 255\n encoded_mask = mask_api.encode(np.asfortranarray(np_mask))\n ann['segmentation'] = encoded_mask\n if 'areas' not in groundtruths:\n ann['area'] = mask_api.area(encoded_mask)\n gt_annotations.append(ann)\n\n for i, ann in enumerate(gt_annotations):\n ann['id'] = i + 1\n\n if label_map:\n gt_categories = [{'id': i, 'name': label_map[i]} for i in label_map]\n else:\n category_ids = [gt['category_id'] for gt in gt_annotations]\n gt_categories = [{'id': i} for i in set(category_ids)]\n\n gt_dataset = {\n 'images': gt_images,\n 'categories': gt_categories,\n 'annotations': copy.deepcopy(gt_annotations),\n }\n return gt_dataset\n\n\nclass COCOGroundtruthGenerator(object):\n \"\"\"Generates the groundtruth annotations from a single example.\"\"\"\n\n def __init__(self, file_pattern, num_examples, include_mask):\n self._file_pattern = file_pattern\n self._num_examples = num_examples\n self._include_mask = include_mask\n self._dataset_fn = tf.data.TFRecordDataset\n\n def _parse_single_example(self, example):\n \"\"\"Parses a single serialized tf.Example proto.\n\n Args:\n example: a serialized tf.Example proto string.\n\n Returns:\n A dictionary of groundtruth with the following fields:\n source_id: a scalar tensor of int64 representing the image source_id.\n height: a scalar tensor of int64 representing the image height.\n width: a scalar tensor of int64 representing the image width.\n boxes: a float tensor of shape [K, 4], representing the groundtruth\n boxes in absolute coordinates with respect to the original image size.\n classes: a int64 tensor of shape [K], representing the class labels of\n each instances.\n is_crowds: a bool tensor of shape [K], indicating whether the instance\n is crowd.\n areas: a float tensor of shape [K], indicating the area of each\n instance.\n masks: a string tensor of shape [K], containing the bytes of the png\n mask of each instance.\n \"\"\"\n decoder = tf_example_decoder.TfExampleDecoder(\n include_mask=self._include_mask)\n decoded_tensors = decoder.decode(example)\n\n image = decoded_tensors['image']\n image_size = tf.shape(image)[0:2]\n boxes = box_utils.denormalize_boxes(\n decoded_tensors['groundtruth_boxes'], image_size)\n groundtruths = {\n 'source_id': tf.string_to_number(\n decoded_tensors['source_id'], out_type=tf.int64),\n 'height': decoded_tensors['height'],\n 'width': decoded_tensors['width'],\n 'num_detections': tf.shape(decoded_tensors['groundtruth_classes'])[0],\n 'boxes': boxes,\n 'classes': decoded_tensors['groundtruth_classes'],\n 'is_crowds': decoded_tensors['groundtruth_is_crowd'],\n 'areas': decoded_tensors['groundtruth_area'],\n }\n if self._include_mask:\n groundtruths.update({\n 'masks': decoded_tensors['groundtruth_instance_masks_png'],\n })\n return groundtruths\n\n def _build_pipeline(self):\n \"\"\"Builds data pipeline to generate groundtruth annotations.\"\"\"\n dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)\n dataset = dataset.apply(\n tf.data.experimental.parallel_interleave(\n lambda filename: self._dataset_fn(filename).prefetch(1),\n cycle_length=32,\n sloppy=False))\n dataset = dataset.map(self._parse_single_example, num_parallel_calls=64)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(1, drop_remainder=False)\n return dataset\n\n def __call__(self):\n with tf.Graph().as_default():\n dataset = self._build_pipeline()\n groundtruth = dataset.make_one_shot_iterator().get_next()\n\n with tf.Session() as sess:\n for _ in range(self._num_examples):\n groundtruth_result = sess.run(groundtruth)\n yield groundtruth_result\n\n\ndef scan_and_generator_annotation_file(file_pattern,\n num_samples,\n include_mask,\n annotation_file):\n \"\"\"Scans and generate the COCO-style annotation JSON file given a dataset.\"\"\"\n groundtruth_generator = COCOGroundtruthGenerator(\n file_pattern, num_samples, include_mask)\n generate_annotation_file(groundtruth_generator, annotation_file)\n\n\ndef generate_annotation_file(groundtruth_generator,\n annotation_file):\n \"\"\"Generates COCO-style annotation JSON file given a groundtruth generator.\"\"\"\n groundtruths = {}\n logging.info('Loading groundtruth annotations from dataset to memory...')\n for groundtruth in groundtruth_generator():\n for k, v in six.iteritems(groundtruth):\n if k not in groundtruths:\n groundtruths[k] = [v]\n else:\n groundtruths[k].append(v)\n gt_dataset = convert_groundtruths_to_coco_dataset(groundtruths)\n\n logging.info('Saving groundtruth annotations to the JSON file...')\n with tf.io.gfile.GFile(annotation_file, 'w') as f:\n f.write(json.dumps(gt_dataset))\n logging.info('Done saving the JSON file...')\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Preprocess images and bounding boxes for detection.\n\nWe perform two sets of operations in preprocessing stage:\n(a) operations that are applied to both training and testing data,\n(b) operations that are applied only to training data for the purpose of\n data augmentation.\n\nA preprocessing function receives a set of inputs,\ne.g. an image and bounding boxes,\nperforms an operation on them, and returns them.\nSome examples are: randomly cropping the image, randomly mirroring the image,\n randomly changing the brightness, contrast, hue and\n randomly jittering the bounding boxes.\n\nThe image is a rank 4 tensor: [1, height, width, channels] with\ndtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where\nin each row there is a box with [ymin xmin ymax xmax].\nBoxes are in normalized coordinates meaning\ntheir coordinate values range in [0, 1]\n\nImportant Note: In tensor_dict, images is a rank 4 tensor, but preprocessing\nfunctions receive a rank 3 tensor for processing the image. Thus, inside the\npreprocess function we squeeze the image to become a rank 3 tensor and then\nwe pass it to the functions. At the end of the preprocess we expand the image\nback to rank 4.\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom official.vision.utils.object_detection import box_list\n\n\ndef _flip_boxes_left_right(boxes):\n \"\"\"Left-right flip the boxes.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes\n are in normalized form meaning their coordinates vary between [0, 1]. Each\n row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Flipped boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_xmin = tf.subtract(1.0, xmax)\n flipped_xmax = tf.subtract(1.0, xmin)\n flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_boxes\n\n\ndef _flip_masks_left_right(masks):\n \"\"\"Left-right flip masks.\n\n Args:\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n representing instance masks.\n\n Returns:\n flipped masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n return masks[:, :, ::-1]\n\n\ndef keypoint_flip_horizontal(keypoints,\n flip_point,\n flip_permutation,\n scope=None):\n \"\"\"Flips the keypoints horizontally around the flip_point.\n\n This operation flips the x coordinate for each keypoint around the flip_point\n and also permutes the keypoints in a manner specified by flip_permutation.\n\n Args:\n keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n flip_point: (float) scalar tensor representing the x coordinate to flip the\n keypoints around.\n flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation. This specifies the mapping from original keypoint indices to\n the flipped keypoint indices. This is used primarily for keypoints that\n are not reflection invariant. E.g. Suppose there are 3 keypoints\n representing ['head', 'right_eye', 'left_eye'], then a logical choice for\n flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'\n and 'right_eye' after a horizontal flip.\n scope: name scope.\n\n Returns:\n new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n \"\"\"\n if not scope:\n scope = 'FlipHorizontal'\n with tf.name_scope(scope):\n keypoints = tf.transpose(a=keypoints, perm=[1, 0, 2])\n keypoints = tf.gather(keypoints, flip_permutation)\n v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)\n u = flip_point * 2.0 - u\n new_keypoints = tf.concat([v, u], 2)\n new_keypoints = tf.transpose(a=new_keypoints, perm=[1, 0, 2])\n return new_keypoints\n\n\ndef keypoint_change_coordinate_frame(keypoints, window, scope=None):\n \"\"\"Changes coordinate frame of the keypoints to be relative to window's frame.\n\n Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint\n coordinates from keypoints of shape [num_instances, num_keypoints, 2]\n to be relative to this window.\n\n An example use case is data augmentation: where we are given groundtruth\n keypoints and would like to randomly crop the image to some window. In this\n case we need to change the coordinate frame of each groundtruth keypoint to be\n relative to this new window.\n\n Args:\n keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]\n window we should change the coordinate frame to.\n scope: name scope.\n\n Returns:\n new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n \"\"\"\n if not scope:\n scope = 'ChangeCoordinateFrame'\n with tf.name_scope(scope):\n win_height = window[2] - window[0]\n win_width = window[3] - window[1]\n new_keypoints = box_list_ops.scale(keypoints - [window[0], window[1]],\n 1.0 / win_height, 1.0 / win_width)\n return new_keypoints\n\n\ndef keypoint_prune_outside_window(keypoints, window, scope=None):\n \"\"\"Prunes keypoints that fall outside a given window.\n\n This function replaces keypoints that fall outside the given window with nan.\n See also clip_to_window which clips any keypoints that fall outside the given\n window.\n\n Args:\n keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]\n window outside of which the op should prune the keypoints.\n scope: name scope.\n\n Returns:\n new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n \"\"\"\n if not scope:\n scope = 'PruneOutsideWindow'\n with tf.name_scope(scope):\n y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)\n win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)\n\n valid_indices = tf.logical_and(\n tf.logical_and(y >= win_y_min, y <= win_y_max),\n tf.logical_and(x >= win_x_min, x <= win_x_max))\n\n new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))\n new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))\n new_keypoints = tf.concat([new_y, new_x], 2)\n\n return new_keypoints\n\n\ndef random_horizontal_flip(image,\n boxes=None,\n masks=None,\n keypoints=None,\n keypoint_flip_permutation=None,\n seed=None):\n \"\"\"Randomly flips the image and detections horizontally.\n\n The probability of flipping the image is 50%.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the\n bounding boxes. Boxes are in normalized form meaning their coordinates\n vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape [num_instances, height,\n width] containing instance masks. The masks are of the same height, width\n as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape [num_instances,\n num_keypoints, 2]. The keypoints are in y-x normalized coordinates.\n keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, keypoints, and keypoint_flip_permutation are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: if keypoints are provided but keypoint_flip_permutation is not.\n \"\"\"\n\n def _flip_image(image):\n # flip image\n image_flipped = tf.image.flip_left_right(image)\n return image_flipped\n\n if keypoints is not None and keypoint_flip_permutation is None:\n raise ValueError(\n 'keypoints are provided but keypoints_flip_permutation is not provided')\n\n with tf.name_scope('RandomHorizontalFlip'):\n result = []\n # random variable defining whether to do flip or not\n do_a_flip_random = tf.greater(tf.random.uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(\n pred=do_a_flip_random,\n true_fn=lambda: _flip_image(image),\n false_fn=lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(\n pred=do_a_flip_random,\n true_fn=lambda: _flip_boxes_left_right(boxes),\n false_fn=lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(\n pred=do_a_flip_random,\n true_fn=lambda: _flip_masks_left_right(masks),\n false_fn=lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None and keypoint_flip_permutation is not None:\n permutation = keypoint_flip_permutation\n keypoints = tf.cond(\n pred=do_a_flip_random,\n true_fn=lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),\n false_fn=lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef _compute_new_static_size(image, min_dimension, max_dimension):\n \"\"\"Compute new static shape for resize_to_range method.\"\"\"\n image_shape = image.get_shape().as_list()\n orig_height = image_shape[0]\n orig_width = image_shape[1]\n num_channels = image_shape[2]\n orig_min_dim = min(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n large_scale_factor = min_dimension / float(orig_min_dim)\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = int(round(orig_height * large_scale_factor))\n large_width = int(round(orig_width * large_scale_factor))\n large_size = [large_height, large_width]\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = max(orig_height, orig_width)\n small_scale_factor = max_dimension / float(orig_max_dim)\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = int(round(orig_height * small_scale_factor))\n small_width = int(round(orig_width * small_scale_factor))\n small_size = [small_height, small_width]\n new_size = large_size\n if max(large_size) > max_dimension:\n new_size = small_size\n else:\n new_size = large_size\n return tf.constant(new_size + [num_channels])\n\n\ndef _compute_new_dynamic_size(image, min_dimension, max_dimension):\n \"\"\"Compute new dynamic shape for resize_to_range method.\"\"\"\n image_shape = tf.shape(input=image)\n orig_height = tf.cast(image_shape[0], dtype=tf.float32)\n orig_width = tf.cast(image_shape[1], dtype=tf.float32)\n num_channels = image_shape[2]\n orig_min_dim = tf.minimum(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n min_dimension = tf.constant(min_dimension, dtype=tf.float32)\n large_scale_factor = min_dimension / orig_min_dim\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = tf.cast(\n tf.round(orig_height * large_scale_factor), dtype=tf.int32)\n large_width = tf.cast(\n tf.round(orig_width * large_scale_factor), dtype=tf.int32)\n large_size = tf.stack([large_height, large_width])\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = tf.maximum(orig_height, orig_width)\n max_dimension = tf.constant(max_dimension, dtype=tf.float32)\n small_scale_factor = max_dimension / orig_max_dim\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = tf.cast(\n tf.round(orig_height * small_scale_factor), dtype=tf.int32)\n small_width = tf.cast(\n tf.round(orig_width * small_scale_factor), dtype=tf.int32)\n small_size = tf.stack([small_height, small_width])\n new_size = tf.cond(\n pred=tf.cast(tf.reduce_max(input_tensor=large_size), dtype=tf.float32) >\n max_dimension,\n true_fn=lambda: small_size,\n false_fn=lambda: large_size)\n else:\n new_size = large_size\n return tf.stack(tf.unstack(new_size) + [num_channels])\n\n\ndef resize_to_range(image,\n masks=None,\n min_dimension=None,\n max_dimension=None,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False,\n pad_to_max_dimension=False):\n \"\"\"Resizes an image so its dimensions are within the provided value.\n\n The output size can be described by two cases:\n 1. If the image can be rescaled so its minimum dimension is equal to the\n provided value without the other dimension exceeding max_dimension,\n then do so.\n 2. Otherwise, resize so the largest dimension is equal to max_dimension.\n\n Args:\n image: A 3D tensor of shape [height, width, channels]\n masks: (optional) rank 3 float32 tensor with shape [num_instances, height,\n width] containing instance masks.\n min_dimension: (optional) (scalar) desired size of the smaller image\n dimension.\n max_dimension: (optional) (scalar) maximum allowed size of the larger image\n dimension.\n method: (optional) interpolation method used in resizing. Defaults to\n BILINEAR.\n align_corners: bool. If true, exactly align all 4 corners of the input and\n output. Defaults to False.\n pad_to_max_dimension: Whether to resize the image and pad it with zeros so\n the resulting image is of the spatial size [max_dimension, max_dimension].\n If masks are included they are padded similarly.\n\n Returns:\n Note that the position of the resized_image_shape changes based on whether\n masks are present.\n resized_image: A 3D tensor of shape [new_height, new_width, channels],\n where the image has been resized (with bilinear interpolation) so that\n min(new_height, new_width) == min_dimension or\n max(new_height, new_width) == max_dimension.\n resized_masks: If masks is not None, also outputs masks. A 3D tensor of\n shape [num_instances, new_height, new_width].\n resized_image_shape: A 1D tensor of shape [3] containing shape of the\n resized image.\n\n Raises:\n ValueError: if the image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('ResizeToRange'):\n if image.get_shape().is_fully_defined():\n new_size = _compute_new_static_size(image, min_dimension, max_dimension)\n else:\n new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)\n new_image = tf.image.resize(image, new_size[:-1], method=method)\n\n if pad_to_max_dimension:\n new_image = tf.image.pad_to_bounding_box(new_image, 0, 0, max_dimension,\n max_dimension)\n\n result = [new_image]\n if masks is not None:\n new_masks = tf.expand_dims(masks, 3)\n new_masks = tf.image.resize(\n new_masks,\n new_size[:-1],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n new_masks = tf.squeeze(new_masks, 3)\n if pad_to_max_dimension:\n new_masks = tf.image.pad_to_bounding_box(new_masks, 0, 0, max_dimension,\n max_dimension)\n result.append(new_masks)\n\n result.append(new_size)\n return result\n\n\ndef _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):\n \"\"\"Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.\n\n Args:\n boxlist_to_copy_to: BoxList to which extra fields are copied.\n boxlist_to_copy_from: BoxList from which fields are copied.\n\n Returns:\n boxlist_to_copy_to with extra fields.\n \"\"\"\n for field in boxlist_to_copy_from.get_extra_fields():\n boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))\n return boxlist_to_copy_to\n\n\ndef box_list_scale(boxlist, y_scale, x_scale, scope=None):\n \"\"\"scale box coordinates in x and y dimensions.\n\n Args:\n boxlist: BoxList holding N boxes\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n scope: name scope.\n\n Returns:\n boxlist: BoxList holding N boxes\n \"\"\"\n if not scope:\n scope = 'Scale'\n with tf.name_scope(scope):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n y_min = y_scale * y_min\n y_max = y_scale * y_max\n x_min = x_scale * x_min\n x_max = x_scale * x_max\n scaled_boxlist = box_list.BoxList(\n tf.concat([y_min, x_min, y_max, x_max], 1))\n return _copy_extra_fields(scaled_boxlist, boxlist)\n\n\ndef keypoint_scale(keypoints, y_scale, x_scale, scope=None):\n \"\"\"Scales keypoint coordinates in x and y dimensions.\n\n Args:\n keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n scope: name scope.\n\n Returns:\n new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]\n \"\"\"\n if not scope:\n scope = 'Scale'\n with tf.name_scope(scope):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n new_keypoints = keypoints * [[[y_scale, x_scale]]]\n return new_keypoints\n\n\ndef scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):\n \"\"\"Scales boxes from normalized to pixel coordinates.\n\n Args:\n image: A 3D float32 tensor of shape [height, width, channels].\n boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding\n boxes in normalized coordinates. Each row is of the form [ymin, xmin,\n ymax, xmax].\n keypoints: (optional) rank 3 float32 tensor with shape [num_instances,\n num_keypoints, 2]. The keypoints are in y-x normalized coordinates.\n\n Returns:\n image: unchanged input image.\n scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the\n bounding boxes in pixel coordinates.\n scaled_keypoints: a 3D float32 tensor with shape\n [num_instances, num_keypoints, 2] containing the keypoints in pixel\n coordinates.\n \"\"\"\n boxlist = box_list.BoxList(boxes)\n image_height = tf.shape(input=image)[0]\n image_width = tf.shape(input=image)[1]\n scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()\n result = [image, scaled_boxes]\n if keypoints is not None:\n scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)\n result.append(scaled_keypoints)\n return tuple(result)\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils to sample tasks for interleaved optimization.\"\"\"\nimport abc\nfrom typing import Union, Dict, Text\nimport tensorflow as tf\n\nfrom official.modeling.multitask import configs\n\n\nclass TaskSampler(tf.Module, metaclass=abc.ABCMeta):\n \"\"\"An abstract class defining task sampling API for interleaving trainer.\"\"\"\n\n def __init__(self, task_weights: Dict[Text, Union[float, int]]):\n self._task_weights = task_weights\n\n @property\n def task_weights(self):\n return self._task_weights\n\n @abc.abstractmethod\n def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:\n \"\"\"Compute cumulative distribution to sample tasks.\n\n It calculates the cumulative distribution of the multinomial task\n distribution with respect to which to be sampled against.\n\n Args:\n global_step: A tensor indicating current progess of training.\n\n Returns:\n A float tensor with shape (#(task), 1) that represents the cumulative\n sampling distribution.\n \"\"\"\n pass\n\n\nclass UniformTaskSampler(TaskSampler):\n \"\"\"Sample all tasks uniformly.\"\"\"\n\n def __init__(self, task_weights: Dict[Text, Union[float, int]]):\n super(UniformTaskSampler, self).__init__(task_weights=task_weights)\n self._uniform_cumulative = tf.math.cumsum(\n tf.constant(\n [1.0 / len(self._task_weights)] * len(self._task_weights),\n dtype=tf.float32))\n\n def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:\n del global_step\n return self._uniform_cumulative\n\n\nclass ProportionalTaskSampler(TaskSampler):\n \"\"\"Sample tasks proportional to task weights.\"\"\"\n\n def __init__(self,\n task_weights: Dict[Text, Union[float, int]],\n alpha: float = 1.0):\n super(ProportionalTaskSampler, self).__init__(task_weights=task_weights)\n self._alpha = tf.cast(alpha, dtype=tf.float32)\n task_weight_dict_ordered_list = tf.constant(\n [weight for _, weight in self._task_weights.items()], dtype=tf.float32)\n task_sizes = tf.math.pow(task_weight_dict_ordered_list, self._alpha)\n task_distribution = task_sizes / tf.reduce_sum(task_sizes)\n self._porportional_cumulative = tf.math.cumsum(task_distribution)\n\n def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:\n del global_step\n return self._porportional_cumulative\n\n\nclass AnnealingTaskSampler(TaskSampler):\n \"\"\"Sample tasks according to task weights as well as training progress.\n\n See http://proceedings.mlr.press/v97/stickland19a/stickland19a.pdf\n \"\"\"\n\n def __init__(self,\n task_weights: Dict[Text, Union[float, int]],\n steps_per_epoch: int,\n total_steps: int):\n super(AnnealingTaskSampler, self).__init__(task_weights=task_weights)\n self._steps_per_epoch = tf.cast(steps_per_epoch, dtype=tf.float32)\n self._total_epochs = tf.cast(\n total_steps / self._steps_per_epoch, dtype=tf.float32)\n\n def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:\n cur_epoch = tf.math.floor(\n tf.cast(global_step, dtype=tf.float32) / self._steps_per_epoch)\n alpha = 1.0 - 0.8 * (cur_epoch - 1) / (self._total_epochs - 1 + 1e-10)\n task_weight_dict_ordered_list = [\n weight for _, weight in self._task_weights.items()\n ]\n task_sizes = tf.math.pow(\n tf.constant(task_weight_dict_ordered_list, dtype=tf.float32),\n tf.cast(alpha, dtype=tf.float32))\n dynamic_task_distribution = task_sizes / tf.reduce_sum(task_sizes)\n return tf.math.cumsum(dynamic_task_distribution)\n\n\ndef get_task_sampler(config: configs.TaskSamplingConfig,\n task_weights: Dict[Text, float]) -> TaskSampler:\n \"\"\"Utils to create task sampler with configuration and task weights.\"\"\"\n oneof_config = config.get()\n if config.type == 'uniform':\n return UniformTaskSampler(task_weights=task_weights)\n elif config.type == 'proportional':\n return ProportionalTaskSampler(\n task_weights=task_weights, alpha=oneof_config.alpha)\n elif config.type == 'annealing':\n return AnnealingTaskSampler(\n task_weights=task_weights,\n steps_per_epoch=oneof_config.steps_per_epoch,\n total_steps=oneof_config.total_steps)\n else:\n raise RuntimeError('Task sampler type not supported')\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Video classification task definition.\"\"\"\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.core import base_task\nfrom official.core import input_reader\nfrom official.core import task_factory\nfrom official.modeling import tf_utils\nfrom official.projects.yt8m.configs import yt8m as yt8m_cfg\nfrom official.projects.yt8m.dataloaders import yt8m_input\nfrom official.projects.yt8m.eval_utils import eval_util\nfrom official.projects.yt8m.modeling import yt8m_model_utils as utils\nfrom official.projects.yt8m.modeling.yt8m_model import DbofModel\n\n\n@task_factory.register_task_cls(yt8m_cfg.YT8MTask)\nclass YT8MTask(base_task.Task):\n \"\"\"A task for video classification.\"\"\"\n\n def build_model(self):\n \"\"\"Builds model for YT8M Task.\"\"\"\n train_cfg = self.task_config.train_data\n common_input_shape = [None, sum(train_cfg.feature_sizes)]\n\n # [batch_size x num_frames x num_features]\n input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)\n logging.info('Build model input %r', common_input_shape)\n\n l2_weight_decay = self.task_config.losses.l2_weight_decay\n # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.\n # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)\n # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)\n l2_regularizer = (\n tf.keras.regularizers.l2(l2_weight_decay /\n 2.0) if l2_weight_decay else None)\n # Model configuration.\n model_config = self.task_config.model\n norm_activation_config = model_config.norm_activation\n model = DbofModel(\n params=model_config,\n input_specs=input_specs,\n num_frames=train_cfg.num_frames,\n num_classes=train_cfg.num_classes,\n activation=norm_activation_config.activation,\n use_sync_bn=norm_activation_config.use_sync_bn,\n norm_momentum=norm_activation_config.norm_momentum,\n norm_epsilon=norm_activation_config.norm_epsilon,\n kernel_regularizer=l2_regularizer)\n return model\n\n def build_inputs(self, params: yt8m_cfg.DataConfig, input_context=None):\n \"\"\"Builds input.\n\n Args:\n params: configuration for input data\n input_context: indicates information about the compute replicas and input\n pipelines\n\n Returns:\n dataset: dataset fetched from reader\n \"\"\"\n\n decoder = yt8m_input.Decoder(input_params=params)\n decoder_fn = decoder.decode\n parser = yt8m_input.Parser(input_params=params)\n parser_fn = parser.parse_fn(params.is_training)\n postprocess = yt8m_input.PostBatchProcessor(input_params=params)\n postprocess_fn = postprocess.post_fn\n transform_batch = yt8m_input.TransformBatcher(input_params=params)\n batch_fn = transform_batch.batch_fn\n\n reader = input_reader.InputReader(\n params,\n dataset_fn=tf.data.TFRecordDataset,\n decoder_fn=decoder_fn,\n parser_fn=parser_fn,\n postprocess_fn=postprocess_fn,\n transform_and_batch_fn=batch_fn)\n\n dataset = reader.read(input_context=input_context)\n\n return dataset\n\n def build_losses(self, labels, model_outputs, aux_losses=None):\n \"\"\"Sigmoid Cross Entropy.\n\n Args:\n labels: tensor containing truth labels.\n model_outputs: output logits of the classifier.\n aux_losses: tensor containing auxiliarly loss tensors, i.e. `losses` in\n keras.Model.\n\n Returns:\n Tensors: The total loss, model loss tensors.\n \"\"\"\n losses_config = self.task_config.losses\n model_loss = tf.keras.losses.binary_crossentropy(\n labels,\n model_outputs,\n from_logits=losses_config.from_logits,\n label_smoothing=losses_config.label_smoothing)\n\n model_loss = tf_utils.safe_mean(model_loss)\n total_loss = model_loss\n if aux_losses:\n total_loss += tf.add_n(aux_losses)\n\n return total_loss, model_loss\n\n def build_metrics(self, training=True):\n \"\"\"Gets streaming metrics for training/validation.\n\n metric: mAP/gAP\n top_k: A positive integer specifying how many predictions are considered\n per video.\n top_n: A positive Integer specifying the average precision at n, or None\n to use all provided data points.\n Args:\n training: bool value, true for training mode, false for eval/validation.\n\n Returns:\n list of strings that indicate metrics to be used\n \"\"\"\n metrics = []\n metric_names = ['total_loss', 'model_loss']\n for name in metric_names:\n metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))\n\n if not training: # Cannot run in train step.\n num_classes = self.task_config.validation_data.num_classes\n top_k = self.task_config.top_k\n top_n = self.task_config.top_n\n self.avg_prec_metric = eval_util.EvaluationMetrics(\n num_classes, top_k=top_k, top_n=top_n)\n\n return metrics\n\n def train_step(self, inputs, model, optimizer, metrics=None):\n \"\"\"Does forward and backward.\n\n Args:\n inputs: a dictionary of input tensors. output_dict = {\n \"video_ids\": batch_video_ids,\n \"video_matrix\": batch_video_matrix,\n \"labels\": batch_labels,\n \"num_frames\": batch_frames, }\n model: the model, forward pass definition.\n optimizer: the optimizer for this training step.\n metrics: a nested structure of metrics objects.\n\n Returns:\n a dictionary of logs.\n \"\"\"\n features, labels = inputs['video_matrix'], inputs['labels']\n num_frames = inputs['num_frames']\n\n # Normalize input features.\n feature_dim = len(features.shape) - 1\n features = tf.nn.l2_normalize(features, feature_dim)\n\n # sample random frames / random sequence\n num_frames = tf.cast(num_frames, tf.float32)\n sample_frames = self.task_config.train_data.num_frames\n if self.task_config.model.sample_random_frames:\n features = utils.sample_random_frames(features, num_frames, sample_frames)\n else:\n features = utils.sample_random_sequence(features, num_frames,\n sample_frames)\n\n num_replicas = tf.distribute.get_strategy().num_replicas_in_sync\n with tf.GradientTape() as tape:\n outputs = model(features, training=True)\n # Casting output layer as float32 is necessary when mixed_precision is\n # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.\n outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)\n\n # Computes per-replica loss\n loss, model_loss = self.build_losses(\n model_outputs=outputs, labels=labels, aux_losses=model.losses)\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n scaled_loss = loss / num_replicas\n\n # For mixed_precision policy, when LossScaleOptimizer is used, loss is\n # scaled for numerical stability.\n if isinstance(optimizer,\n tf.keras.mixed_precision.LossScaleOptimizer):\n scaled_loss = optimizer.get_scaled_loss(scaled_loss)\n\n tvars = model.trainable_variables\n grads = tape.gradient(scaled_loss, tvars)\n # Scales back gradient before apply_gradients when LossScaleOptimizer is\n # used.\n if isinstance(optimizer,\n tf.keras.mixed_precision.LossScaleOptimizer):\n grads = optimizer.get_unscaled_gradients(grads)\n\n # Apply gradient clipping.\n if self.task_config.gradient_clip_norm > 0:\n grads, _ = tf.clip_by_global_norm(grads,\n self.task_config.gradient_clip_norm)\n optimizer.apply_gradients(list(zip(grads, tvars)))\n\n logs = {self.loss: loss}\n\n all_losses = {'total_loss': loss, 'model_loss': model_loss}\n\n if metrics:\n for m in metrics:\n m.update_state(all_losses[m.name])\n logs.update({m.name: m.result()})\n\n return logs\n\n def validation_step(self, inputs, model, metrics=None):\n \"\"\"Validatation step.\n\n Args:\n inputs: a dictionary of input tensors. output_dict = {\n \"video_ids\": batch_video_ids,\n \"video_matrix\": batch_video_matrix,\n \"labels\": batch_labels,\n \"num_frames\": batch_frames, }\n model: the model, forward definition\n metrics: a nested structure of metrics objects.\n\n Returns:\n a dictionary of logs.\n \"\"\"\n features, labels = inputs['video_matrix'], inputs['labels']\n num_frames = inputs['num_frames']\n\n # Normalize input features.\n feature_dim = len(features.shape) - 1\n features = tf.nn.l2_normalize(features, feature_dim)\n\n # sample random frames (None, 5, 1152) -> (None, 30, 1152)\n sample_frames = self.task_config.validation_data.num_frames\n if self.task_config.model.sample_random_frames:\n features = utils.sample_random_frames(features, num_frames, sample_frames)\n else:\n features = utils.sample_random_sequence(features, num_frames,\n sample_frames)\n\n outputs = self.inference_step(features, model)\n outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)\n if self.task_config.validation_data.segment_labels:\n # workaround to ignore the unrated labels.\n outputs *= inputs['label_weights']\n # remove padding\n outputs = outputs[~tf.reduce_all(labels == -1, axis=1)]\n labels = labels[~tf.reduce_all(labels == -1, axis=1)]\n loss, model_loss = self.build_losses(\n model_outputs=outputs, labels=labels, aux_losses=model.losses)\n\n logs = {self.loss: loss}\n\n all_losses = {'total_loss': loss, 'model_loss': model_loss}\n\n logs.update({self.avg_prec_metric.name: (labels, outputs)})\n\n if metrics:\n for m in metrics:\n m.update_state(all_losses[m.name])\n logs.update({m.name: m.result()})\n return logs\n\n def inference_step(self, inputs, model):\n \"\"\"Performs the forward step.\"\"\"\n return model(inputs, training=False)\n\n def aggregate_logs(self, state=None, step_logs=None):\n if state is None:\n state = self.avg_prec_metric\n self.avg_prec_metric.accumulate(\n labels=step_logs[self.avg_prec_metric.name][0],\n predictions=step_logs[self.avg_prec_metric.name][1])\n return state\n\n def reduce_aggregated_logs(self, aggregated_logs, global_step=None):\n avg_prec_metrics = self.avg_prec_metric.get()\n self.avg_prec_metric.clear()\n return avg_prec_metrics\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.activations.serialize",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.experimental.EinsumDense",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.activations.get",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.range",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.ones",
"tensorflow.test.main"
],
[
"tensorflow.keras.Input",
"tensorflow.keras.backend.image_data_format",
"tensorflow.identity",
"tensorflow.keras.layers.InputSpec",
"tensorflow.add_n"
],
[
"tensorflow.python.eager.context.num_gpus",
"tensorflow.test.is_built_with_cuda",
"tensorflow.test.main",
"tensorflow.compat.v2.keras.mixed_precision.set_global_policy",
"tensorflow.compat.v2.keras.mixed_precision.global_policy"
],
[
"tensorflow.Graph",
"tensorflow.shape",
"tensorflow.io.gfile.GFile",
"numpy.asfortranarray",
"numpy.concatenate",
"tensorflow.data.Dataset.list_files",
"tensorflow.Session",
"tensorflow.string_to_number"
],
[
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.image.pad_to_bounding_box",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.split",
"tensorflow.round",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.maximum",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.image.flip_left_right",
"tensorflow.image.resize",
"tensorflow.logical_and"
],
[
"tensorflow.constant",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.math.pow",
"tensorflow.math.cumsum"
],
[
"tensorflow.nn.l2_normalize",
"tensorflow.keras.regularizers.l2",
"tensorflow.cast",
"tensorflow.reduce_all",
"tensorflow.keras.losses.binary_crossentropy",
"tensorflow.clip_by_global_norm",
"tensorflow.add_n",
"tensorflow.distribute.get_strategy",
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
dataiku-research/paper_ial_2021 | [
"f860b6eb2d8471bc23e44d282e50c4deaf0813d9",
"f860b6eb2d8471bc23e44d282e50c4deaf0813d9"
] | [
"exp/experimenter.py",
"exp/run.py"
] | [
"import json\nfrom importlib import import_module\nimport pandas as pd\nfrom pandas.errors import EmptyDataError\nimport numpy as np\nimport pickle\nimport shutil\nimport time\nfrom pathlib import Path\nfrom collections import defaultdict\n\n\nclass CsvValue:\n\n def __init__(self, path):\n self.path = path\n try:\n self._data = pd.read_csv(self.path)\n # Make all columns not called value as index\n self._data.set_index(self._data.columns.drop('value').to_list(), inplace=True)\n except (FileNotFoundError, EmptyDataError):\n self._data = None\n\n def upsert(self, index, value):\n if self._data is None:\n self._data = pd.DataFrame([{**index, 'value': value}])\n self._data.set_index(self._data.columns.drop('value').to_list(), inplace=True)\n else:\n # Check that the index match\n diff = set(index.keys()).difference(set(self._data.index.names))\n if len(diff) != 0:\n raise ValueError('Index mismatch between DB and query: {}'.format(diff))\n \n # Now we just need to update the value if already there otherwise add it\n loc = tuple([index[k] for k in self._data.index.names])\n try:\n self._data.at[loc, 'value'] = value\n except KeyError:\n self._data = self._data.append(pd.DataFrame([[value]], columns=['value'], index=[loc]))\n self._data.to_csv(self.path)\n\n\nclass CsvDb:\n def __init__(self, folder):\n self.folder = Path(folder)\n self._values = dict()\n if not self.folder.exists():\n self.folder.mkdir()\n else:\n for f in self.folder.iterdir():\n if f.is_dir():\n continue\n self._values[f.stem] = CsvValue(str(f))\n \n def upsert(self, key, index, value):\n if not key in self._values:\n self._values[key] = CsvValue(str(self.folder / (key + '.csv')))\n self._values[key].upsert(index, value)\n\n\nclass Experiment():\n\n def __init__(self, db, seed, path='./cache', force=False, verbose=0):\n self.db = CsvDb(db)\n self.seed = seed\n self.path = Path(path) / str(seed)\n if not self.path.exists():\n self.path.mkdir(exist_ok=True, parents=True)\n self.verbose = verbose\n self._memory = defaultdict(dict)\n self.force = force\n \n def _log(self, verbosity, message):\n if self.verbose >= verbosity:\n print(message)\n\n def log_value(self, config, key, value):\n self.db.upsert(key, config, value)\n\n def _load(self, iter_id, name, tmp=False):\n if tmp:\n filebase = self.path / str(iter_id) / 'tmp' / name\n else:\n filebase = self.path / str(iter_id) / name \n if filebase.with_suffix('.npy').exists():\n value = np.load(filebase.with_suffix('.npy'))\n elif filebase.with_suffix('.krs').exists():\n from keras.models import load_model\n value = load_model(filebase.with_suffix('.krs'))\n elif filebase.with_suffix('.pkl').exists():\n with open(filebase.with_suffix('.pkl'), 'rb') as filedesc:\n value = pickle.load(filedesc)\n else:\n raise ValueError('Could not load variable {}.{}'.format(iter_id, name))\n self._memory[iter_id][name] = value\n return value\n\n def _save_value_at(self, iter_id, name, value, tmp=False):\n self._memory[iter_id][name] = value\n\n if tmp:\n filebase = self.path / str(iter_id) / 'tmp' / name\n else:\n filebase = self.path / str(iter_id) / name\n if type(value).__module__ == np.__name__:\n np.save(filebase.with_suffix('.npy'), value)\n elif 'keras' in value.__module__.split('.'):\n from keras.models import save_model\n save_model(value, filebase.with_suffix('.krs'))\n else:\n with open(filebase.with_suffix('.pkl'), 'wb') as f:\n pickle.dump(value, f)\n\n def retrieve_value_at(self, iter_id, name, first=None):\n self._log(2, 'Retrieving {} {}'.format(iter_id, name))\n if self.first:\n return first\n if iter_id in self._memory and name in self._memory[iter_id]:\n return self._memory[iter_id][name]\n return self._load(iter_id, name)\n\n def persist_value_at(self, iter_id, name, value):\n self._log(2, 'Persisting {} {}'.format(iter_id, name))\n self._memory[iter_id][name] = value\n self._save_value_at(iter_id, name, value)\n\n def resume_value_at(self, iter_id, name, first=None):\n self._log(2, 'Resuming {} {}'.format(iter_id, name))\n if self.first:\n return first\n if iter_id in self._memory and name in self._memory[iter_id]:\n return self._memory[iter_id][name]\n return self._load(iter_id, name, tmp=True)\n\n def cache_value_at(self, iter_id, name, value):\n self._log(2, 'Caching {} {}'.format(iter_id, name))\n\n self._memory[iter_id][name] = value\n self._save_value_at(iter_id, name, value, tmp=True)\n\n def iter(self, items, force_recompute=False):\n\n previous_iter_id = None\n self.first = True\n self._memory = defaultdict(dict)\n\n for current_iter_id in items:\n tmp_path = self.path / str(current_iter_id) / 'tmp'\n tmp_path.mkdir(exist_ok=True, parents=True)\n summary_path = self.path / str(current_iter_id) / 'completed.json'\n\n if summary_path.exists() and not self.force:\n self._log(1, 'Iteration {} already computed.'.format(current_iter_id))\n self.first = False\n continue\n\n t0 = time.time()\n yield current_iter_id\n delta = time.time() - t0\n\n with open(str(summary_path), 'w') as f:\n json.dump(dict(duration=delta), f)\n \n if previous_iter_id is not None:\n del self._memory[previous_iter_id]\n tmp_path = self.path / str(previous_iter_id) / 'tmp'\n if tmp_path.exists():\n shutil.rmtree(str(tmp_path))\n \n previous_iter_id = current_iter_id\n self.first = False\n\n tmp_path = self.path / str(previous_iter_id) / 'tmp'\n if tmp_path.exists():\n shutil.rmtree(str(tmp_path))\n del self._memory\n self._memory = defaultdict(dict)\n",
"import os\nimport sys\nimport itertools\nfrom copy import deepcopy\nfrom pathlib import Path\nimport importlib\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.metrics import accuracy_score\n\nfrom cardinal.uncertainty import MarginSampler, ConfidenceSampler, _get_probability_classes\nfrom cardinal.random import RandomSampler\nfrom cardinal.clustering import MiniBatchKMeansSampler\nfrom cardinal.utils import ActiveLearningSplitter\n\nfrom samplers import TwoStepIncrementalMiniBatchKMeansSampler, TwoStepMiniBatchKMeansSampler, InformedConfidenceSampler\nfrom experimenter import Experiment\n\n\nsamplers_to_compute = None\nif len(sys.argv) > 1:\n samplers_to_compute = sys.argv[1:]\n print('Will only compute samplers', samplers_to_compute)\n\n\ncwd = Path.cwd()\ncache_path = cwd / 'cache'\nexp_path = cwd / '..' / 'exp'\ndatabase_path = 'sqlite:///database.db'\n\ndataset_name = cwd.stem\n\n\nprint('Cache path is {}'.format(cache_path))\n\n# Load experiment configuration\nexp_module = importlib.import_module(dataset_name)\nexp_config = exp_module.get_config()\n\nstart_size = exp_config['start_size']\nbatches = exp_config['batches']\noracle_error = exp_config.get('oracle_error', None)\n\ndata = exp_module.get_dataset()\nX = data['X']\ny = data['y']\n\nget_clf = exp_module.get_clf\nfit_clf = exp_module.fit_clf\n\ny_ = y\nif len(y.shape) == 2:\n y_ = np.argmax(y, axis=1)\n\n\nif len(y.shape) == 2:\n n_classes = y.shape[1]\nelse:\n n_classes = len(np.unique(y))\n\n\nk_start = False\n\niters = [i.item() for i in np.cumsum([start_size] + batches)]\nbatches.append(batches[-1])\nassert(len(batches) == len(iters))\n\n\nmodel_cache = dict()\n\n\nfor seed, ds in itertools.product(['11', '22', '33', '44', '55'], ['A', 'B']):\n print(seed, ds)\n methods = {\n 'random': lambda params: RandomSampler(batch_size=params['batch_size'], random_state=int(seed)),\n 'margin': lambda params: MarginSampler(params['clf'], batch_size=params['batch_size'], assume_fitted=True),\n 'uncertainty': lambda params: ConfidenceSampler(params['clf'], batch_size=params['batch_size'], assume_fitted=True),\n 'wkmeans': lambda params: TwoStepMiniBatchKMeansSampler(n_classes, params['clf'], params['batch_size'], assume_fitted=True, n_init=1, random_state=int(seed)),\n 'iwkmeans': lambda params: TwoStepIncrementalMiniBatchKMeansSampler(n_classes, params['clf'], params['batch_size'], assume_fitted=True, n_init=1, random_state=int(seed)),\n 'iconfidence': lambda params: InformedConfidenceSampler(params['clf'], batch_size=params['batch_size'], assume_fitted=True),\n }\n if samplers_to_compute is None:\n samplers_to_compute = list(methods.keys())\n \n splitter = ActiveLearningSplitter(X.shape[0], test_size=.5, random_state=int(seed))\n index = np.arange(X.shape[0])\n\n precomputed_proba_path = Path('precomputed_proba') / (seed + ds)\n\n if not precomputed_proba_path.exists():\n precomputed_proba_path.mkdir(parents=True)\n clf = get_clf()\n fit_clf(clf, X[splitter.test], y[splitter.test], **exp_config.get('full_dataset_fit_params', {}))\n y_proba = _get_probability_classes(clf, X)\n\n max_confidence = confidence_score('precomputed', y_proba)\n np.save(str(precomputed_proba_path / 'max_confidence.npy'), max_confidence)\n np.save(str(precomputed_proba_path / 'proba.npy'), 1 - max_confidence)\n\n max_confidence = np.load(str(precomputed_proba_path / 'max_confidence.npy'))\n\n for name in samplers_to_compute:\n print(name)\n \n splitter = ActiveLearningSplitter(X.shape[0], test_size=.5, random_state=int(seed))\n if ds == 'B':\n # Exchange -1 and -2\n splitter._mask = -(splitter._mask + 2) - 1\n\n method = methods[name]\n exp = Experiment(database_path, seed + ds, path=os.path.join(cache_path, name))\n\n if not k_start:\n first_index, _ = train_test_split(np.arange(X[splitter.train].shape[0]), train_size=iters[0], random_state=int(seed), stratify=y[splitter.train])\n else:\n start_sampler = MiniBatchKMeansSampler(iters[0], random_state=int(seed))\n start_sampler.fit(X[splitter.train])\n first_index = start_sampler.select_samples(X[splitter.train])\n splitter.add_batch(first_index)\n \n for i in exp.iter(range(len(iters))):\n selected = exp.retrieve_value_at(i - 1, 'selected', first=splitter._mask)\n splitter._mask = selected\n\n classifier = exp.resume_value_at(i - 1, 'classifier', first=get_clf())\n fit_clf(classifier, X[splitter.selected], y[splitter.selected])\n exp.cache_value_at(i, 'classifier', classifier)\n\n predicted = exp.persist_value_at(i, 'predicted', classifier.predict_proba(X))\n \n params = dict(batch_size=batches[i], clf=classifier, iter=i + 1)\n sampler = method(params)\n sampler.fit(X[splitter.selected], y[splitter.selected])\n\n if name.startswith('iwkmeans'):\n new_selected_index = sampler.select_samples(X[splitter.non_selected], fixed_cluster_centers=X[splitter.selected])\n elif name.startswith('iconfidence'):\n new_selected_index = sampler.select_samples(X[splitter.non_selected], max_confidence[splitter.non_selected])\n else:\n new_selected_index = sampler.select_samples(X[splitter.non_selected])\n\n splitter.add_batch(new_selected_index)\n exp.persist_value_at(i, 'selected', splitter._mask)\n print(iters[i], splitter.selected.sum())\n\n \n for i in range(len(iters)):\n selected = exp.retrieve_value_at(i, 'selected')\n splitter._mask = selected\n splitter.current_iter = selected.max()\n assert(splitter.selected_at(i).sum() == iters[i])\n predicted = exp.retrieve_value_at(i, 'predicted')\n predicted_test = predicted[splitter.test] \n predicted_selected = predicted[splitter.selected_at(i)] \n \n config = dict(\n seed=seed + ds,\n method=name,\n n_iter=i,\n dataset=dataset_name\n )\n\n exp.log_value(config, 'accuracy', accuracy_score(y_[splitter.test], np.argmax(predicted_test, axis=1)))\n exp.log_value(config, 'selected_accuracy', accuracy_score(y_[splitter.selected_at(i)], np.argmax(predicted_selected, axis=1)))\n print('acc', accuracy_score(y_[splitter.test], np.argmax(predicted_test, axis=1)))\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
],
[
"numpy.arange",
"numpy.argmax",
"numpy.cumsum",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
seo-dev/cvml_project | [
"7c95ce22db6f31dc4624af9417edffde021b5351"
] | [
"segmentation/eval.py"
] | [
"import os\nfrom segmentation.cityscape_reader import CityscapesDemoDataset\nimport tensorflow as tf\nimport argparse\nimport numpy as np\nimport cv2\n\nfrom segmentation.labels import cityscapes_mask_colors\nfrom segmentation.model import DeeplabV3\n\nparser = argparse.ArgumentParser(description=\"Cityscapes\")\nparser.add_argument('--project_name', default=\"segmentation_cityscapes\")\nparser.add_argument('--identifier', default=\"deeplabv3_densenet121\")\nparser.add_argument('--data_dir', required=True, help=\"path data root\")\n\n\ndef label2rgb(label, img=None, alpha=0.5, cmap=None):\n label_rgb = cmap[label]\n if img is not None:\n label_rgb = alpha * label_rgb + (1 - alpha) * img\n label_rgb = label_rgb.astype(np.uint8)\n return label_rgb\n\n\[email protected]\ndef predict(model, inputs):\n logits = model(inputs, training=False)\n return logits\n\n\ndef val(model, dataset, save_dir):\n for i, (rgb, inputs, img_path) in enumerate(dataset):\n rgb = tf.squeeze(rgb).numpy()\n\n # Predict\n logits = predict(model, inputs)\n pred = tf.squeeze(tf.argmax(logits, -1)).numpy().astype(np.uint8)\n\n # Save Images\n pred_color = label2rgb(pred, img=rgb, cmap=cityscapes_mask_colors)\n mask_path = os.path.join(save_dir, f'{int(i):04d}.png')\n cv2.imwrite(mask_path, cv2.cvtColor(pred_color, cv2.COLOR_RGB2BGR))\n\n\n\ndef evaluate(args):\n project_dir = os.getcwd()\n output_dir = os.path.join(project_dir, 'results', args.identifier)\n save_dir = os.path.join(output_dir, 'demo')\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n model = DeeplabV3(input_shape=None)\n\n ckpt = tf.train.Checkpoint(step=tf.Variable(1), net=model)\n manager = tf.train.CheckpointManager(ckpt, output_dir, max_to_keep=3)\n ckpt.restore(manager.latest_checkpoint).expect_partial()\n if manager.latest_checkpoint:\n print(\"Restored from {}\".format(manager.latest_checkpoint))\n else:\n print(\"No weights to Restores.\")\n raise\n\n val_dataset = CityscapesDemoDataset(args.data_dir, sequence='stuttgart_02')\n val_dataset = val_dataset.load_tfdataset()\n\n val(model, val_dataset, save_dir)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n evaluate(args)\n"
] | [
[
"tensorflow.train.CheckpointManager",
"tensorflow.argmax",
"tensorflow.squeeze",
"tensorflow.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
berquist/eg | [
"4c368b12eaaffcf0af8032f10348cf8bc1c3957a"
] | [
"python/keras/masking.py"
] | [
"from pprint import pprint\n\nimport numpy as np\n\nfrom keras.models import Model\nfrom keras.layers import Activation, Dense, Input, Masking, TimeDistributed\n\n\nif __name__ == \"__main__\":\n\n inp = Input(shape=(3, 6))\n mask = Masking(mask_value=0.1)(inp)\n out = TimeDistributed(Dense(1, activation=\"linear\"))(mask)\n model = Model(inputs=inp, outputs=out)\n\n print(\"Architecture\")\n model.summary()\n\n model.set_weights(\n [\n np.array([[1.0], [1.0], [1.0], [1.0], [1.0], [1.0]], dtype=np.float32),\n np.array([0.0], dtype=np.float32),\n ]\n )\n\n print(\"Weights\")\n pprint(model.get_weights())\n\n data = np.array(\n [[[3, 1, 2, 2, 0.1, 0.1], [0, 0, 0, 0, 0, 0], [2, 1, 1, 2, 0.1, 0.1]]]\n )\n p = model.predict(data)\n print(p)\n\n # Masking only works when all features of a timestep are equal to the mask\n # value.\n #\n # From https://github.com/keras-team/keras/issues/3086#issuecomment-526057828\n data = np.array(\n [[[3, 1, 2, 2, 0.1, 0.1], [0, 0, 0, 0, 0, 0], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]]\n )\n p = model.predict(data)\n print(p)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DmitryUlyanov/deeppy | [
"79cc7cb552f30bc70eeea9ee7ff4976b0899ea66"
] | [
"examples/siamese_mnist.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nSiamese networks\n================\n\n\"\"\"\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import offsetbox\nimport deeppy as dp\n\n# Fetch MNIST data\ndataset = dp.dataset.MNIST()\nx_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)\n\n# Normalize pixel intensities\nscaler = dp.StandardScaler()\nx_train = scaler.fit_transform(x_train)\nx_test = scaler.transform(x_test)\n\n# Generate image pairs\nn_pairs = 100000\nx1 = np.empty((n_pairs, 28*28), dtype=dp.float_)\nx2 = np.empty_like(x1, dtype=dp.float_)\ny = np.empty(n_pairs, dtype=dp.int_)\nn_imgs = x_train.shape[0]\nn = 0\nwhile n < n_pairs:\n i = random.randint(0, n_imgs-1)\n j = random.randint(0, n_imgs-1)\n if i == j:\n continue\n x1[n, ...] = x_train[i]\n x2[n, ...] = x_train[j]\n if y_train[i] == y_train[j]:\n y[n] = 1\n else:\n y[n] = 0\n n += 1\n\n# Prepare network inputs\ntrain_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)\n\n# Setup network\nw_gain = 1.5\nw_decay = 1e-4\nnet = dp.SiameseNetwork(\n siamese_layers=[\n dp.FullyConnected(\n n_out=1024,\n weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),\n ),\n dp.ReLU(),\n dp.FullyConnected(\n n_out=1024,\n weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),\n ),\n dp.ReLU(),\n dp.FullyConnected(\n n_out=2,\n weights=dp.Parameter(dp.AutoFiller(w_gain)),\n ),\n ],\n loss=dp.ContrastiveLoss(margin=1.0),\n)\n\n# Train network\ntrainer = dp.StochasticGradientDescent(\n max_epochs=15,\n learn_rule=dp.RMSProp(learn_rate=0.01),\n)\ntrainer.train(net, train_input)\n\n# Plot 2D embedding\ntest_input = dp.Input(x_test)\nx_test = np.reshape(x_test, (-1,) + dataset.img_shape)\nfeat = net.features(test_input)\nfeat -= np.min(feat, 0)\nfeat /= np.max(feat, 0)\n\nplt.figure()\nax = plt.subplot(111)\nshown_images = np.array([[1., 1.]])\nfor i in range(feat.shape[0]):\n dist = np.sum((feat[i] - shown_images)**2, 1)\n if np.min(dist) < 6e-4:\n # don't show points that are too close\n continue\n shown_images = np.r_[shown_images, [feat[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(x_test[i], zoom=0.6, cmap=plt.cm.gray_r),\n xy=feat[i], frameon=False\n )\n ax.add_artist(imagebox)\n\nplt.xticks([]), plt.yticks([])\nplt.title('Embedding from the last layer of the network')\n"
] | [
[
"matplotlib.offsetbox.OffsetImage",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.reshape",
"numpy.empty_like",
"numpy.max",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"numpy.array",
"numpy.sum",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucas-sancere/DRFNS | [
"a35e01d516e9b491c09eaca6701e7e0fe9e56880"
] | [
"src_RealData/Data/CreateTFRecords.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom DataGenRandomT import DataGenRandomT\nfrom DataGenClass import DataGen3, DataGenMulti, DataGen3reduce\nimport numpy as np\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef CreateTFRecord(OUTNAME, PATH, CROP, SIZE,\n TRANSFORM_LIST, UNET, MEAN_FILE, \n SEED, TEST_PATIENT, N_EPOCH, TYPE = \"Normal\",\n SPLIT=\"train\"):\n \"\"\"\n Takes a DataGen object and creates an associated TFRecord file. \n We do not perform data augmentation on the fly but save the \n augmented images in the record. Most of the parameters here \n reference paramaters of the DataGen object. In particular, PATH,\n CROP, SIZE, TRANSFORM_LIST, UNET, SEED and TEST_PATIENT. \n OUTNAME is the name of the record.\n \"\"\"\n\n tfrecords_filename = OUTNAME\n writer = tf.io.TFRecordWriter(tfrecords_filename)\n\n \n if TYPE == \"Normal\":\n DG = DataGenRandomT(PATH, split=SPLIT, crop=CROP, size=SIZE,\n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n\n elif TYPE == \"3class\":\n DG = DataGen3(PATH, split=SPLIT, crop = CROP, size=SIZE, \n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n elif TYPE == \"ReducedClass\":\n DG = DataGen3reduce(PATH, split=SPLIT, crop = CROP, size=SIZE, \n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n elif TYPE == \"JUST_READ\":\n DG = DataGenMulti(PATH, split=SPLIT, crop = CROP, size=SIZE, \n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n\n DG.SetPatient(TEST_PATIENT)\n N_ITER_MAX = N_EPOCH * DG.length\n\n original_images = []\n key = DG.RandomKey(False)\n if not UNET:\n for _ in range(N_ITER_MAX):\n key = DG.NextKeyRandList(0)\n img, annotation = DG[key]\n # img = img.astype(np.uint8)\n annotation = annotation.astype(np.uint8)\n height = img.shape[0]\n width = img.shape[1]\n \n original_images.append((img, annotation))\n \n img_raw = img.tostring()\n annotation_raw = annotation.tostring()\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'image_raw': _bytes_feature(img_raw),\n 'mask_raw': _bytes_feature(annotation_raw)}))\n \n writer.write(example.SerializeToString())\n else:\n for _ in range(N_ITER_MAX):\n key = DG.NextKeyRandList(0)\n img, annotation = DG[key]\n # img = img.astype(np.uint8)\n annotation = annotation.astype(np.uint8)\n height_img = img.shape[0]\n width_img = img.shape[1]\n\n height_mask = annotation.shape[0]\n width_mask = annotation.shape[1]\n \n original_images.append((img, annotation))\n \n img_raw = img.tostring()\n annotation_raw = annotation.tostring()\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'height_img': _int64_feature(height_img),\n 'width_img': _int64_feature(width_img),\n 'height_mask': _int64_feature(height_mask),\n 'width_mask': _int64_feature(width_mask),\n 'image_raw': _bytes_feature(img_raw),\n 'mask_raw': _bytes_feature(annotation_raw)}))\n \n writer.write(example.SerializeToString())\n\n\n writer.close()\n"
] | [
[
"tensorflow.io.TFRecordWriter",
"tensorflow.train.BytesList",
"tensorflow.train.Int64List"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shreyasvj25/turicreate | [
"dd210c2563930881abd51fd69cb73007955b33fd",
"dd210c2563930881abd51fd69cb73007955b33fd",
"32e84ca16aef8d04aff3d49ae9984bd49326bffd"
] | [
"src/unity/python/turicreate/test/test_graph.py",
"src/external/xgboost/python-package/xgboost/training.py",
"src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nfrom ..data_structures.sgraph import SGraph, Vertex, Edge, load_sgraph\nfrom ..data_structures.sframe import SFrame\nfrom . import util\n\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport unittest\nimport tempfile\nimport json\nimport os\n\nimport sys\nif sys.version_info.major > 2:\n unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual\n\nclass GraphTests(unittest.TestCase):\n def setUp(self):\n self.vertices = pd.DataFrame({\n 'vid': ['1', '2', '3'],\n 'color': ['g', None, 'b'],\n 'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})\n self.edges = pd.DataFrame({\n 'src_id': ['1', '2', '3'],\n 'dst_id': ['2', '3', '4'],\n 'weight': [0., None, 1.]})\n\n def test_empty_graph(self):\n g = SGraph()\n self.assertEqual(g.summary(), {'num_vertices': 0, 'num_edges': 0})\n self.assertEqual(len(g.get_fields()), 3)\n self.assertTrue(g.get_vertices(format='sframe').shape, (0, 1))\n self.assertTrue(g.get_edges(format='sframe').shape, (0, 2))\n self.assertTrue(g.vertices.shape, (0, 1))\n self.assertTrue(g.edges.shape, (0, 2))\n self.assertTrue(len(g.get_vertices(format='list')) == 0)\n self.assertTrue(len(g.get_edges(format='list')) == 0)\n\n def test_graph_constructor(self):\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n g2 = SGraph(g.vertices, g.edges)\n g3 = SGraph(g.vertices, g.edges, src_field=\"__dst_id\", dst_field=\"__src_id\") #flip around src and dst\n assert_frame_equal(g.vertices.to_dataframe().sort_values('__id').reset_index(drop=True),\n g2.vertices.to_dataframe().sort_values('__id').reset_index(drop=True))\n assert_frame_equal(g.edges.to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True),\n g2.edges.to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges)))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), 'vid', '__src_id', '__dst_id'))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), vid_field=None, src_field='src_id', dst_field='dst_id'))\n\n def test_simple_graph(self):\n for input_type in [pd.DataFrame, SFrame, list]:\n g = SGraph()\n if input_type is list:\n vertices = [Vertex(x[1]['vid'], {'color': x[1]['color'], 'vec': x[1]['vec']}) for x in self.vertices.iterrows()]\n edges = [Edge(x[1]['src_id'], x[1]['dst_id'], {'weight': x[1]['weight']}) for x in self.edges.iterrows()]\n g = g.add_vertices(vertices)\n g = g.add_edges(edges)\n else:\n g = g.add_vertices(input_type(self.vertices), vid_field='vid')\n g = g.add_edges(input_type(self.edges), src_field='src_id', dst_field='dst_id')\n self.assertEqual(g.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g.get_fields(), ['__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'])\n self.assertItemsEqual(g.get_vertices(format='dataframe').columns.values, ['color', 'vec'])\n self.assertItemsEqual(g.get_edges(format='dataframe').columns.values, ['__src_id', '__dst_id', 'weight'])\n self.assertTrue(g.get_edges(format='dataframe').shape, (3, 3))\n self.assertTrue(g.get_vertices(format='dataframe').shape, (4, 3))\n self.assertTrue(g.get_vertices(format='dataframe', fields={'color': 'g'}).shape, (1, 2))\n self.assertTrue(g.get_edges(format='dataframe', fields={'weight': 0.}).shape, (1, 3))\n\n self.assertItemsEqual(g.get_vertices(format='sframe').column_names(), ['__id', 'color', 'vec'])\n self.assertItemsEqual(g.get_edges(format='sframe').column_names(), ['__src_id', '__dst_id', 'weight'])\n self.assertTrue(g.get_edges(format='sframe').shape, (3, 3))\n self.assertTrue(g.get_vertices(format='sframe').shape, (4, 3))\n self.assertTrue(g.get_vertices(format='sframe', fields={'color': 'g'}).shape, (1, 2))\n self.assertTrue(g.get_edges(format='sframe', fields={'weight': 0.}).shape, (1, 3))\n\n vertices = g.get_vertices(format='list')\n edges = g.get_edges(format='list')\n self.assertEqual(len(vertices), 4)\n self.assertEqual(len(edges), 3)\n\n # get edges is lazy\n edges = g.get_edges()\n self.assertFalse(edges.__is_materialized__())\n\n def test_vertex_query(self):\n df = pd.DataFrame({'src': ['a', 'c', 'b', 'd', 'c', 'e', 'g', 'f'],\n 'dst': ['b', 'b', 'd', 'c', 'e', 'g', 'f', 'e']})\n g = SGraph().add_edges(df, src_field='src', dst_field='dst')\n\n # basic check\n g2 = g.get_neighborhood(ids=['b'], radius=1, full_subgraph=False)\n out = g2.get_edges(format='dataframe')\n out.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n out.index = range(len(out))\n\n correct = pd.DataFrame.from_records([('b', 'd'),\n ('a', 'b'),\n ('c', 'b')],\n columns=['__src_id', '__dst_id'])\n correct.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n correct.index = range(len(correct))\n assert_frame_equal(out, correct, check_dtype=False)\n\n # check larger radius, full subgraph, and multiple vertices\n g2 = g.get_neighborhood(ids=['a', 'g'], radius=2, full_subgraph=True)\n out = g2.get_edges(format='dataframe')\n out.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n out.index = range(len(out))\n\n correct = pd.DataFrame.from_records([('a', 'b'),\n ('b', 'd'),\n ('c', 'b'),\n ('c', 'e'),\n ('d', 'c'),\n ('e', 'g'),\n ('f', 'e'),\n ('g', 'f')],\n columns=['__src_id', '__dst_id'])\n correct.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n correct.index = range(len(correct))\n assert_frame_equal(out, correct, check_dtype=False)\n\n def test_select_query(self):\n g = SGraph()\n g = g.add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n g2 = g.select_fields([\"color\", \"weight\"])\n self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id', 'weight'])\n g2 = g.select_fields([\"color\"])\n self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id'])\n del g.edges['weight']\n del g.vertices['vec']\n g.vertices['color2'] = g.vertices['color']\n self.assertSequenceEqual((g.get_fields()), ['__id', 'color', 'color2', '__src_id', '__dst_id'])\n g2 = g.select_fields([])\n self.assertSequenceEqual((g2.get_fields()), ['__id', '__src_id', '__dst_id'])\n\n def test_select_query_with_same_vertex_edge_field(self):\n vertices = SFrame({'__id': range(10)})\n edges = SFrame({'__src_id': range(10), '__dst_id': range(1, 11)})\n g = SGraph(vertices, edges)\n g.vertices['weight'] = 0\n g.vertices['v'] = 0\n g.edges['weight'] = 0\n g.edges['e'] = 0\n self.assertItemsEqual(g.get_fields(), ['v', 'e', 'weight', 'weight', '__id', '__src_id', '__dst_id'])\n g2 = g.select_fields('weight')\n self.assertItemsEqual(g2.get_fields(), ['weight', 'weight', '__id', '__src_id', '__dst_id'])\n\n def test_save_load(self):\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n with util.TempDirectory() as f:\n g.save(f)\n g2 = load_sgraph(f, 'binary')\n self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})\n\n with util.TempDirectory() as f:\n g.save(f, format='csv')\n vertices = SFrame.read_csv(f + \"/vertices.csv\")\n edges = SFrame.read_csv(f + \"/edges.csv\")\n g2 = SGraph().add_edges(edges, '__src_id', '__dst_id').add_vertices(vertices, '__id')\n self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})\n\n temp_fn = None\n # The delete=False is for Windows sake\n with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:\n temp_fn = f.name\n g.save(f.name)\n with open(f.name, 'r') as f2:\n data = f2.read()\n g2 = json.loads(data)\n self.assertTrue(\"vertices\" in g2)\n self.assertTrue(\"edges\" in g2)\n if os.path.exists(temp_fn):\n os.remove(temp_fn)\n\n def test_load_graph_from_text(self):\n toy_graph_snap = \"\"\"#some comment string\n #some more comment string\n 1\\t2\n 1\\t3\n 2\\t3\n 2\\t1\n 3\\t1\n 3\\t2\"\"\"\n\n toy_graph_tsv = \"\"\"1\\t2\n 1\\t3\n 2\\t3\n 2\\t1\n 3\\t1\n 3\\t2\"\"\"\n toy_graph_csv = \"\"\"1,2\n 1,3\n 2,3\n 2,1\n 3,1\n 3,2\"\"\"\n\n temp_fnames = []\n with tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as fsnap, tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as ftsv, tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as fcsv:\n fsnap.write(toy_graph_snap)\n fsnap.file.flush()\n ftsv.write(toy_graph_tsv)\n ftsv.file.flush()\n fcsv.write(toy_graph_csv)\n fcsv.file.flush()\n for (fname, fmt) in zip([fsnap.name, ftsv.name, fcsv.name], ['snap', 'tsv', 'csv']):\n g = load_sgraph(fname, fmt)\n self.assertEqual(g.summary(), {'num_vertices': 3, 'num_edges': 6})\n temp_fnames.append(fname)\n\n for name in temp_fnames:\n if os.path.exists(name):\n os.remove(name)\n\n def test_robust_parse(self):\n df = pd.DataFrame({'int': [1, 2, 3],\n 'float': [1., 2., 3.],\n 'str': ['one', 'two', 'three'],\n 'nan': [np.nan, np.nan, np.nan],\n 'sparse_int': [1, 2, np.nan],\n 'sparse_float': [np.nan, 2., 3.],\n 'sparse_str': [None, 'two', None]\n })\n g = SGraph().add_vertices(df)\n self.assertItemsEqual(g.get_fields(), df.columns.tolist() + ['__id', '__src_id', '__dst_id'])\n\n df2 = g.get_vertices(format='dataframe')\n sf = g.get_vertices(format='sframe')\n for col in df.columns:\n # potential bug: df2 is missing the 'nan' column.\n if (col != 'nan'):\n self.assertItemsEqual(sorted(list(df2[col].dropna())), sorted(list(df[col].dropna())))\n self.assertItemsEqual(sorted(list(sf[col].dropna())), sorted(list(df[col].dropna())))\n\n def test_missing_value_vids(self):\n vertices = SFrame()\n vertices['vid'] = [1, 2, 3, None]\n edges = SFrame()\n edges['src'] = [1, 2, 3, None]\n edges['dst'] = [4, 4, 4, 4]\n self.assertRaises(RuntimeError, lambda : SGraph().add_vertices(vertices, 'vid').summary())\n self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'src', 'dst').summary())\n self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'dst', 'src').summary())\n\n def test_gframe(self):\n g = SGraph()\n v = g.vertices\n self.assertSequenceEqual(v.column_names(), ['__id'])\n e = g.edges\n self.assertSequenceEqual(e.column_names(), ['__src_id', '__dst_id'])\n\n # Test vertices and edge attributes cannot be modified\n def set_vertices_empty(g):\n g.vertices = SFrame()\n\n def set_edges_empty(g):\n g.edges = SFrame()\n\n def remove_vertices(g):\n del g.vertices\n\n def remove_edges(g):\n del g.edges\n\n def remove_edge_column(gf, name):\n del gf[name]\n\n self.assertRaises(AttributeError, lambda: remove_vertices(g))\n self.assertRaises(AttributeError, lambda: remove_edges(g))\n self.assertRaises(AttributeError, lambda: set_vertices_empty(g))\n self.assertRaises(AttributeError, lambda: set_edges_empty(g))\n\n # Test gframe operations has the same effect as its sframe+graph equivalent\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n v = g.vertices\n v['id_col'] = v['__id']\n e = g.edges\n e['src_id_col'] = e['__src_id']\n e['dst_id_col'] = e['__dst_id']\n g2 = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n new_vdata = g2.get_vertices()\n new_vdata['id_col'] = new_vdata['__id']\n new_edata = g2.get_edges()\n new_edata['src_id_col'] = new_edata['__src_id']\n new_edata['dst_id_col'] = new_edata['__dst_id']\n g2 = SGraph().add_vertices(new_vdata, '__id').add_edges(new_edata, '__src_id', '__dst_id')\n assert_frame_equal(g.get_vertices().to_dataframe().sort_values('__id').reset_index(drop=True),\n g2.get_vertices().to_dataframe().sort_values('__id').reset_index(drop=True))\n assert_frame_equal(g.get_edges().to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True),\n g2.get_edges().to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True))\n\n # check delete a column with exception, and edges is still in a valid state\n self.assertRaises(KeyError, lambda: remove_edge_column(g.edges, 'badcolumn'))\n g.edges.head()\n\n # test slicing\n assert_frame_equal(g.edges[:3].to_dataframe(), g.get_edges()[:3].to_dataframe())\n assert_frame_equal(g.vertices[:3].to_dataframe(), g.get_vertices()[:3].to_dataframe())\n\n # test add row number\n e_expected = g.get_edges().to_dataframe()\n v_expected = g.get_vertices().to_dataframe()\n e_expected['id'] = range(len(e_expected))\n v_expected['id'] = range(len(v_expected))\n\n def test_sframe_le_append_skip_row_bug_is_fixed(self):\n \"\"\"\n This test is actually for SFrame lazy evaluation.\n The reason it is here is because the repro can only be done in SGraph.\n\n The bug appears when the SFrame has lazy_append and when passing through\n the logical filter, skip_rows is not done correctly. So the edge_sframe\n is in a bad state when not materialized.\n\n This unit test stays here to ensure the bug is fixed until we can find\n a more clean repro.\n \"\"\"\n n = 12 # smallest n to repro the le_append bug\n\n # A graph with edge i -> i + 1\n g = SGraph().add_edges(SFrame({'src': range(n), 'dst': range(1, n + 1)}), 'src', 'dst')\n\n lazy_sf = g.get_edges()\n materialized_sf = g.get_edges()\n materialized_sf.__materialize__()\n assert_frame_equal(lazy_sf[lazy_sf['__dst_id'] == n].to_dataframe(), materialized_sf[materialized_sf['__dst_id'] == n].to_dataframe())\n",
"# coding: utf-8\n# pylint: disable=too-many-locals, too-many-arguments, invalid-name\n# pylint: disable=too-many-branches\n\"\"\"Training Library containing training routines.\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\nimport re\nimport numpy as np\nfrom .core import Booster, STRING_TYPES\n\ndef train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,\n maximize=False, early_stopping_rounds=None, evals_result=None,\n verbose_eval=True, learning_rates=None, xgb_model=None):\n # pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init\n \"\"\"Train a booster with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round: int\n Number of boosting iterations.\n watchlist (evals): list of pairs (DMatrix, string)\n List of items to be evaluated during training, this allows user to watch\n performance on the validation set.\n obj : function\n Customized objective function.\n feval : function\n Customized evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Validation error needs to decrease at least\n every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals.\n If there's more than one, will use the last.\n Returns the model from the last iteration (not the best one).\n If early stopping occurs, the model will have two additional fields:\n bst.best_score and bst.best_iteration.\n evals_result: dict\n This dictionary stores the evaluation results of all the items in watchlist.\n Example: with a watchlist containing [(dtest,'eval'), (dtrain,'train')] and\n and a paramater containing ('eval_metric', 'logloss')\n Returns: {'train': {'logloss': ['0.48253', '0.35953']},\n 'eval': {'logloss': ['0.480385', '0.357756']}}\n verbose_eval : bool\n If `verbose_eval` then the evaluation metric on the validation set, if\n given, is printed at each boosting stage.\n learning_rates: list or function\n Learning rate for each boosting round (yields learning rate decay).\n - list l: eta = l[boosting round]\n - function f: eta = f(boosting round, num_boost_round)\n xgb_model : file name of stored xgb model or 'Booster' instance\n Xgb model to be loaded before training (allows training continuation).\n\n Returns\n -------\n booster : a trained booster model\n \"\"\"\n evals = list(evals)\n ntrees = 0\n if xgb_model is not None:\n if not isinstance(xgb_model, STRING_TYPES):\n xgb_model = xgb_model.save_raw()\n bst = Booster(params, [dtrain] + [d[0] for d in evals], model_file=xgb_model)\n ntrees = len(bst.get_dump())\n else:\n bst = Booster(params, [dtrain] + [d[0] for d in evals])\n\n\n if evals_result is not None:\n if not isinstance(evals_result, dict):\n raise TypeError('evals_result has to be a dictionary')\n else:\n evals_name = [d[1] for d in evals]\n evals_result.clear()\n evals_result.update({key: {} for key in evals_name})\n\n if not early_stopping_rounds:\n for i in range(num_boost_round):\n bst.update(dtrain, i, obj)\n ntrees += 1\n if len(evals) != 0:\n bst_eval_set = bst.eval_set(evals, i, feval)\n if isinstance(bst_eval_set, STRING_TYPES):\n msg = bst_eval_set\n else:\n msg = bst_eval_set.decode()\n\n if verbose_eval:\n sys.stderr.write(msg + '\\n')\n if evals_result is not None:\n res = re.findall(\"([0-9a-zA-Z@]+[-]*):-?([0-9.]+).\", msg)\n for key in evals_name:\n evals_idx = evals_name.index(key)\n res_per_eval = len(res) // len(evals_name)\n for r in range(res_per_eval):\n res_item = res[(evals_idx*res_per_eval) + r]\n res_key = res_item[0]\n res_val = res_item[1]\n if res_key in evals_result[key]:\n evals_result[key][res_key].append(res_val)\n else:\n evals_result[key][res_key] = [res_val]\n bst.best_iteration = (ntrees - 1)\n return bst\n\n else:\n # early stopping\n if len(evals) < 1:\n raise ValueError('For early stopping you need at least one set in evals.')\n\n if verbose_eval:\n sys.stderr.write(\"Will train until {} error hasn't decreased in {} rounds.\\n\".format(\\\n evals[-1][1], early_stopping_rounds))\n\n # is params a list of tuples? are we using multiple eval metrics?\n if isinstance(params, list):\n if len(params) != len(dict(params).items()):\n raise ValueError('Check your params.'\\\n 'Early stopping works with single eval metric only.')\n params = dict(params)\n\n # either minimize loss or maximize AUC/MAP/NDCG\n maximize_score = False\n if 'eval_metric' in params:\n maximize_metrics = ('auc', 'map', 'ndcg')\n if any(params['eval_metric'].startswith(x) for x in maximize_metrics):\n maximize_score = True\n if feval is not None:\n maximize_score = maximize\n\n if maximize_score:\n best_score = 0.0\n else:\n best_score = float('inf')\n\n best_msg = ''\n best_score_i = ntrees\n\n if isinstance(learning_rates, list) and len(learning_rates) != num_boost_round:\n raise ValueError(\"Length of list 'learning_rates' has to equal 'num_boost_round'.\")\n\n for i in range(num_boost_round):\n if learning_rates is not None:\n if isinstance(learning_rates, list):\n bst.set_param({'eta': learning_rates[i]})\n else:\n bst.set_param({'eta': learning_rates(i, num_boost_round)})\n bst.update(dtrain, i, obj)\n ntrees += 1\n bst_eval_set = bst.eval_set(evals, i, feval)\n\n if isinstance(bst_eval_set, STRING_TYPES):\n msg = bst_eval_set\n else:\n msg = bst_eval_set.decode()\n\n if verbose_eval:\n sys.stderr.write(msg + '\\n')\n\n if evals_result is not None:\n res = re.findall(\"([0-9a-zA-Z@]+[-]*):-?([0-9.]+).\", msg)\n for key in evals_name:\n evals_idx = evals_name.index(key)\n res_per_eval = len(res) // len(evals_name)\n for r in range(res_per_eval):\n res_item = res[(evals_idx*res_per_eval) + r]\n res_key = res_item[0]\n res_val = res_item[1]\n if res_key in evals_result[key]:\n evals_result[key][res_key].append(res_val)\n else:\n evals_result[key][res_key] = [res_val]\n\n score = float(msg.rsplit(':', 1)[1])\n if (maximize_score and score > best_score) or \\\n (not maximize_score and score < best_score):\n best_score = score\n best_score_i = (ntrees - 1)\n best_msg = msg\n elif i - best_score_i >= early_stopping_rounds:\n sys.stderr.write(\"Stopping. Best iteration:\\n{}\\n\\n\".format(best_msg))\n bst.best_score = best_score\n bst.best_iteration = best_score_i\n break\n bst.best_score = best_score\n bst.best_iteration = best_score_i\n return bst\n\n\nclass CVPack(object):\n \"\"\"\"Auxiliary datastruct to hold one fold of CV.\"\"\"\n def __init__(self, dtrain, dtest, param):\n \"\"\"\"Initialize the CVPack\"\"\"\n self.dtrain = dtrain\n self.dtest = dtest\n self.watchlist = [(dtrain, 'train'), (dtest, 'test')]\n self.bst = Booster(param, [dtrain, dtest])\n\n def update(self, iteration, fobj):\n \"\"\"\"Update the boosters for one iteration\"\"\"\n self.bst.update(self.dtrain, iteration, fobj)\n\n def eval(self, iteration, feval):\n \"\"\"\"Evaluate the CVPack for one iteration.\"\"\"\n return self.bst.eval_set(self.watchlist, iteration, feval)\n\n\ndef mknfold(dall, nfold, param, seed, evals=(), fpreproc=None):\n \"\"\"\n Make an n-fold list of CVPack from random indices.\n \"\"\"\n evals = list(evals)\n np.random.seed(seed)\n randidx = np.random.permutation(dall.num_row())\n kstep = len(randidx) / nfold\n idset = [randidx[(i * kstep): min(len(randidx), (i + 1) * kstep)] for i in range(nfold)]\n ret = []\n for k in range(nfold):\n dtrain = dall.slice(np.concatenate([idset[i] for i in range(nfold) if k != i]))\n dtest = dall.slice(idset[k])\n # run preprocessing on the data set if needed\n if fpreproc is not None:\n dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())\n else:\n tparam = param\n plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]\n ret.append(CVPack(dtrain, dtest, plst))\n return ret\n\n\ndef aggcv(rlist, show_stdv=True, show_progress=None, as_pandas=True):\n # pylint: disable=invalid-name\n \"\"\"\n Aggregate cross-validation results.\n \"\"\"\n cvmap = {}\n idx = rlist[0].split()[0]\n for line in rlist:\n arr = line.split()\n assert idx == arr[0]\n for it in arr[1:]:\n if not isinstance(it, STRING_TYPES):\n it = it.decode()\n k, v = it.split(':')\n if k not in cvmap:\n cvmap[k] = []\n cvmap[k].append(float(v))\n\n msg = idx\n\n if show_stdv:\n fmt = '\\tcv-{0}:{1}+{2}'\n else:\n fmt = '\\tcv-{0}:{1}'\n\n index = []\n results = []\n for k, v in sorted(cvmap.items(), key=lambda x: x[0]):\n v = np.array(v)\n if not isinstance(msg, STRING_TYPES):\n msg = msg.decode()\n mean, std = np.mean(v), np.std(v)\n msg += fmt.format(k, mean, std)\n\n index.extend([k + '-mean', k + '-std'])\n results.extend([mean, std])\n\n\n\n if as_pandas:\n try:\n import pandas as pd\n results = pd.Series(results, index=index)\n except ImportError:\n if show_progress is None:\n show_progress = True\n else:\n # if show_progress is default (None),\n # result will be np.ndarray as it can't hold column name\n if show_progress is None:\n show_progress = True\n\n if show_progress:\n sys.stderr.write(msg + '\\n')\n\n return results\n\n\ndef cv(params, dtrain, num_boost_round=10, nfold=3, metrics=(),\n obj=None, feval=None, fpreproc=None, as_pandas=True,\n show_progress=None, show_stdv=True, seed=0):\n # pylint: disable = invalid-name\n \"\"\"Cross-validation with given paramaters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round : int\n Number of boosting iterations.\n nfold : int\n Number of folds in CV.\n metrics : list of strings\n Evaluation metrics to be watched in CV.\n obj : function\n Custom objective function.\n feval : function\n Custom evaluation function.\n fpreproc : function\n Preprocessing function that takes (dtrain, dtest, param) and returns\n transformed versions of those.\n as_pandas : bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return np.ndarray\n show_progress : bool or None, default None\n Whether to display the progress. If None, progress will be displayed\n when np.ndarray is returned.\n show_stdv : bool, default True\n Whether to display the standard deviation in progress.\n Results are not affected, and always contains std.\n seed : int\n Seed used to generate the folds (passed to numpy.random.seed).\n\n Returns\n -------\n evaluation history : list(string)\n \"\"\"\n results = []\n cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc)\n for i in range(num_boost_round):\n for fold in cvfolds:\n fold.update(i, obj)\n res = aggcv([f.eval(i, feval) for f in cvfolds],\n show_stdv=show_stdv, show_progress=show_progress,\n as_pandas=as_pandas)\n results.append(res)\n\n if as_pandas:\n try:\n import pandas as pd\n results = pd.DataFrame(results)\n except ImportError:\n results = np.array(results)\n else:\n results = np.array(results)\n\n return results\n",
"# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\"\"\"\nClass definition and utilities for the activity classification toolkit.\n\"\"\"\nfrom __future__ import absolute_import as _\nfrom __future__ import print_function as _\nfrom __future__ import division as _\n\nimport numpy as _np\nimport time as _time\nimport six as _six\n\nfrom turicreate import SArray as _SArray, SFrame as _SFrame\nfrom turicreate import aggregate as _agg\n\nimport turicreate.toolkits._internal_utils as _tkutl\nfrom turicreate.toolkits import _coreml_utils\nimport turicreate.toolkits._feature_engineering._internal_utils as _fe_tkutl\nfrom turicreate.toolkits._main import ToolkitError as _ToolkitError\nfrom turicreate.toolkits import evaluation as _evaluation\nfrom .. import _mxnet_utils\n\nfrom turicreate.toolkits._model import CustomModel as _CustomModel\nfrom turicreate.toolkits._model import PythonProxy as _PythonProxy\n\nfrom .util import random_split_by_session as _random_split_by_session\n\n\ndef create(dataset, session_id, target, features=None, prediction_window=100,\n validation_set='auto', max_iterations=10, batch_size=32, verbose=True):\n \"\"\"\n Create an :class:`ActivityClassifier` model.\n\n Parameters\n ----------\n dataset : SFrame\n Input data which consists of `sessions` of data where each session is\n a sequence of data. The data must be in `stacked` format, grouped by\n session. Within each session, the data is assumed to be sorted\n temporally. Columns in `features` will be used to train a model that\n will make a prediction using labels in the `target` column.\n\n session_id : string\n Name of the column that contains a unique ID for each session.\n\n target : string\n Name of the column containing the target variable. The values in this\n column must be of string or integer type. Use `model.classes` to\n retrieve the order in which the classes are mapped.\n\n features : list[string], optional\n Name of the columns containing the input features that will be used\n for classification. If set to `None`, all columns except `session_id`\n and `target` will be used.\n\n prediction_window : int, optional\n Number of time units between predictions. For example, if your input\n data is sampled at 100Hz, and the `prediction_window` is set to 100,\n then this model will make a prediction every 1 second.\n\n validation_set : SFrame, optional\n A dataset for monitoring the model's generalization performance to\n prevent the model from overfitting to the training data.\n\n For each row of the progress table, accuracy is measured over the\n provided training dataset and the `validation_set`. The format of this\n SFrame must be the same as the training set.\n\n When set to 'auto', a validation set is automatically sampled from the\n training data (if the training data has > 100 sessions). If\n validation_set is set to None, then all the data will be used for\n training.\n\n max_iterations : int , optional\n Maximum number of iterations/epochs made over the data during the\n training phase.\n\n batch_size : int, optional\n Number of sequence chunks used per training step. Must be greater than\n the number of GPUs in use.\n\n verbose : bool, optional\n If True, print progress updates and model details.\n\n Returns\n -------\n out : ActivityClassifier\n A trained :class:`ActivityClassifier` model.\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import turicreate as tc\n\n # Training on dummy data\n >>> data = tc.SFrame({\n ... 'accelerometer_x': [0.1, 0.2, 0.3, 0.4, 0.5] * 10,\n ... 'accelerometer_y': [0.5, 0.4, 0.3, 0.2, 0.1] * 10,\n ... 'accelerometer_z': [0.01, 0.01, 0.02, 0.02, 0.01] * 10,\n ... 'session_id': [0, 0, 0] * 10 + [1, 1] * 10,\n ... 'activity': ['walk', 'run', 'run'] * 10 + ['swim', 'swim'] * 10\n ... })\n\n # Create an activity classifier\n >>> model = tc.activity_classifier.create(data,\n ... session_id='session_id', target='activity',\n ... features=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'])\n\n # Make predictions (as probability vector, or class)\n >>> predictions = model.predict(data)\n >>> predictions = model.predict(data, output_type='probability_vector')\n\n # Get both predictions and classes together\n >>> predictions = model.classify(data)\n\n # Get topk predictions (instead of only top-1) if your labels have more\n # 2 classes\n >>> predictions = model.predict_topk(data, k = 3)\n\n # Evaluate the model\n >>> results = model.evaluate(data)\n\n See Also\n --------\n ActivityClassifier, util.random_split_by_session\n \"\"\"\n _tkutl._raise_error_if_not_sframe(dataset, \"dataset\")\n from ._mx_model_architecture import _net_params\n from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter\n from ._sframe_sequence_iterator import prep_data as _prep_data\n from ._mx_model_architecture import _define_model_mxnet, _fit_model_mxnet\n from ._mps_model_architecture import _define_model_mps, _fit_model_mps\n from .._mps_utils import (use_mps as _use_mps,\n mps_device_name as _mps_device_name,\n ac_weights_mps_to_mxnet as _ac_weights_mps_to_mxnet)\n\n\n if not isinstance(target, str):\n raise _ToolkitError('target must be of type str')\n if not isinstance(session_id, str):\n raise _ToolkitError('session_id must be of type str')\n _tkutl._raise_error_if_sframe_empty(dataset, 'dataset')\n _tkutl._numeric_param_check_range('prediction_window', prediction_window, 1, 400)\n _tkutl._numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)\n\n if features is None:\n features = _fe_tkutl.get_column_names(dataset,\n interpret_as_excluded=True,\n column_names=[session_id, target])\n if not hasattr(features, '__iter__'):\n raise TypeError(\"Input 'features' must be a list.\")\n if not all([isinstance(x, str) for x in features]):\n raise TypeError(\"Invalid feature %s: Feature names must be of type str.\" % x)\n if len(features) == 0:\n raise TypeError(\"Input 'features' must contain at least one column name.\")\n\n start_time = _time.time()\n dataset = _tkutl._toolkits_select_columns(dataset, features + [session_id, target])\n _tkutl._raise_error_if_sarray_not_expected_dtype(dataset[target], target, [str, int])\n _tkutl._raise_error_if_sarray_not_expected_dtype(dataset[session_id], session_id, [str, int])\n\n if isinstance(validation_set, str) and validation_set == 'auto':\n dataset, validation_set = _random_split_by_session(dataset, session_id)\n\n # Encode the target column to numerical values\n use_target = target is not None\n dataset, target_map = _encode_target(dataset, target)\n\n predictions_in_chunk = 20\n chunked_data, num_sessions = _prep_data(dataset, features, session_id, prediction_window,\n predictions_in_chunk, target=target, verbose=verbose)\n\n # Decide whether to use MPS GPU, MXnet GPU or CPU\n num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)\n use_mps = _use_mps() and num_mxnet_gpus == 0\n\n if verbose:\n if use_mps:\n print('Using GPU to create model ({})'.format(_mps_device_name()))\n elif num_mxnet_gpus == 1:\n print('Using GPU to create model (CUDA)')\n elif num_mxnet_gpus > 1:\n print('Using {} GPUs to create model (CUDA)'.format(num_mxnet_gpus))\n else:\n print('Using CPU to create model')\n\n # Create data iterators\n user_provided_batch_size = batch_size\n batch_size = max(batch_size, num_mxnet_gpus, 1)\n use_mx_data_batch = not use_mps\n data_iter = _SFrameSequenceIter(chunked_data, len(features),\n prediction_window, predictions_in_chunk,\n batch_size, use_target=use_target, mx_output=use_mx_data_batch)\n\n if validation_set is not None:\n _tkutl._raise_error_if_not_sframe(validation_set, 'validation_set')\n _tkutl._raise_error_if_sframe_empty(validation_set, 'validation_set')\n validation_set = _tkutl._toolkits_select_columns(\n validation_set, features + [session_id, target])\n validation_set = validation_set.filter_by(list(target_map.keys()), target)\n validation_set, mapping = _encode_target(validation_set, target, target_map)\n chunked_validation_set, _ = _prep_data(validation_set, features, session_id, prediction_window,\n predictions_in_chunk, target=target, verbose=False)\n\n valid_iter = _SFrameSequenceIter(chunked_validation_set, len(features),\n prediction_window, predictions_in_chunk,\n batch_size, use_target=use_target, mx_output=use_mx_data_batch)\n else:\n valid_iter = None\n\n # Define model architecture\n context = _mxnet_utils.get_mxnet_context(max_devices=num_sessions)\n\n # Always create MXnet models, as the pred_model is later saved to the state\n # If MPS is used - the loss_model will be overwritten\n loss_model, pred_model = _define_model_mxnet(len(target_map), prediction_window,\n predictions_in_chunk, context)\n\n if use_mps:\n loss_model = _define_model_mps(batch_size, len(features), len(target_map),\n prediction_window, predictions_in_chunk, is_prediction_model=False)\n\n log = _fit_model_mps(loss_model, data_iter, valid_iter, max_iterations, verbose)\n\n else:\n # Train the model using Mxnet\n log = _fit_model_mxnet(loss_model, data_iter, valid_iter,\n max_iterations, num_mxnet_gpus, verbose)\n\n # Set up prediction model\n pred_model.bind(data_shapes=data_iter.provide_data, label_shapes=None,\n for_training=False)\n\n if use_mps:\n mps_params = loss_model.export()\n arg_params, aux_params = _ac_weights_mps_to_mxnet(mps_params, _net_params['lstm_h'])\n else:\n arg_params, aux_params = loss_model.get_params()\n\n pred_model.init_params(arg_params=arg_params, aux_params=aux_params)\n\n # Save the model\n state = {\n '_pred_model': pred_model,\n 'verbose': verbose,\n 'training_time': _time.time() - start_time,\n 'target': target,\n 'classes': sorted(target_map.keys()),\n 'features': features,\n 'session_id': session_id,\n 'prediction_window': prediction_window,\n 'max_iterations': max_iterations,\n 'num_examples': len(dataset),\n 'num_sessions': num_sessions,\n 'num_classes': len(target_map),\n 'num_features': len(features),\n 'training_accuracy': log['train_acc'],\n 'training_log_loss': log['train_loss'],\n '_target_id_map': target_map,\n '_id_target_map': {v: k for k, v in target_map.items()},\n '_predictions_in_chunk': predictions_in_chunk,\n '_recalibrated_batch_size': data_iter.batch_size,\n 'batch_size' : user_provided_batch_size\n }\n\n if validation_set is not None:\n state['valid_accuracy'] = log['valid_acc']\n state['valid_log_loss'] = log['valid_loss']\n\n model = ActivityClassifier(state)\n return model\n\n\ndef _encode_target(data, target, mapping=None):\n \"\"\" Encode targets to integers in [0, num_classes - 1] \"\"\"\n if mapping is None:\n mapping = {t: i for i, t in enumerate(sorted(data[target].unique()))}\n\n data[target] = data[target].apply(lambda t: mapping[t])\n return data, mapping\n\nclass ActivityClassifier(_CustomModel):\n \"\"\"\n A trained model that is ready to use for classification or export to\n CoreML.\n\n This model should not be constructed directly.\n \"\"\"\n\n _PYTHON_ACTIVITY_CLASSIFIER_VERSION = 2\n\n def __init__(self, state):\n self.__proxy__ = _PythonProxy(state)\n\n def _get_native_state(self):\n state = self.__proxy__.get_state()\n state['_pred_model'] = _mxnet_utils.get_mxnet_state(state['_pred_model'])\n return state\n\n @classmethod\n def _load_version(cls, state, version):\n from ._mx_model_architecture import _define_model_mxnet\n\n _tkutl._model_version_check(version, cls._PYTHON_ACTIVITY_CLASSIFIER_VERSION)\n\n data_seq_len = state['prediction_window'] * state['_predictions_in_chunk']\n\n context = _mxnet_utils.get_mxnet_context(max_devices=state['num_sessions'])\n _, _pred_model = _define_model_mxnet(len(state['_target_id_map']), state['prediction_window'],\n state['_predictions_in_chunk'], context)\n\n batch_size = state['batch_size']\n preds_in_chunk = state['_predictions_in_chunk']\n win = state['prediction_window'] * preds_in_chunk\n num_features = len(state['features'])\n data_shapes = [('data', (batch_size, win, num_features))]\n\n _pred_model.bind(data_shapes=data_shapes, label_shapes=None,\n for_training=False)\n arg_params = _mxnet_utils.params_from_dict(state['_pred_model']['arg_params'])\n aux_params = _mxnet_utils.params_from_dict(state['_pred_model']['aux_params'])\n _pred_model.init_params(arg_params=arg_params, aux_params=aux_params)\n state['_pred_model'] = _pred_model\n\n return ActivityClassifier(state)\n\n @classmethod\n def _native_name(cls):\n return \"activity_classifier\"\n\n def _get_version(self):\n return self._PYTHON_ACTIVITY_CLASSIFIER_VERSION\n\n def export_coreml(self, filename):\n \"\"\"\n Export the model in Core ML format.\n\n Parameters\n ----------\n filename: str\n A valid filename where the model can be saved.\n\n Examples\n --------\n >>> model.export_coreml(\"MyModel.mlmodel\")\n \"\"\"\n import coremltools as _cmt\n import mxnet as _mx\n from ._mx_model_architecture import _net_params\n\n prob_name = self.target + 'Probability'\n label_name = self.target\n\n input_features = [\n ('features', _cmt.models.datatypes.Array(*(1, self.prediction_window, self.num_features)))\n ]\n output_features = [\n (prob_name, _cmt.models.datatypes.Array(*(self.num_classes,)))\n ]\n\n model_params = self._pred_model.get_params()\n weights = {k: v.asnumpy() for k, v in model_params[0].items()}\n weights = _mx.rnn.LSTMCell(num_hidden=_net_params['lstm_h']).unpack_weights(weights)\n moving_weights = {k: v.asnumpy() for k, v in model_params[1].items()}\n\n builder = _cmt.models.neural_network.NeuralNetworkBuilder(\n input_features,\n output_features,\n mode='classifier'\n )\n\n # Conv\n # (1,1,W,C) -> (1,C,1,W)\n builder.add_permute(name='permute_layer', dim=(0, 3, 1, 2),\n input_name='features', output_name='conv_in')\n W = _np.expand_dims(weights['conv_weight'], axis=0).transpose((2, 3, 1, 0))\n builder.add_convolution(name='conv_layer',\n kernel_channels=self.num_features,\n output_channels=_net_params['conv_h'],\n height=1, width=self.prediction_window,\n stride_height=1, stride_width=self.prediction_window,\n border_mode='valid', groups=1,\n W=W, b=weights['conv_bias'], has_bias=True,\n input_name='conv_in', output_name='relu0_in')\n builder.add_activation(name='relu_layer0', non_linearity='RELU',\n input_name='relu0_in', output_name='lstm_in')\n\n # LSTM\n builder.add_optionals([('lstm_h_in', _net_params['lstm_h']),\n ('lstm_c_in', _net_params['lstm_h'])],\n [('lstm_h_out', _net_params['lstm_h']),\n ('lstm_c_out', _net_params['lstm_h'])])\n\n W_x = [weights['lstm_i2h_i_weight'], weights['lstm_i2h_f_weight'],\n weights['lstm_i2h_o_weight'], weights['lstm_i2h_c_weight']]\n W_h = [weights['lstm_h2h_i_weight'], weights['lstm_h2h_f_weight'],\n weights['lstm_h2h_o_weight'], weights['lstm_h2h_c_weight']]\n bias = [weights['lstm_h2h_i_bias'], weights['lstm_h2h_f_bias'],\n weights['lstm_h2h_o_bias'], weights['lstm_h2h_c_bias']]\n\n builder.add_unilstm(name='lstm_layer',\n W_h=W_h, W_x=W_x, b=bias,\n input_size=_net_params['conv_h'],\n hidden_size=_net_params['lstm_h'],\n input_names=['lstm_in', 'lstm_h_in', 'lstm_c_in'],\n output_names=['dense0_in', 'lstm_h_out', 'lstm_c_out'],\n inner_activation='SIGMOID')\n\n # Dense\n builder.add_inner_product(name='dense_layer',\n W=weights['dense0_weight'], b=weights['dense0_bias'],\n input_channels=_net_params['lstm_h'],\n output_channels=_net_params['dense_h'],\n has_bias=True,\n input_name='dense0_in',\n output_name='bn_in')\n\n builder.add_batchnorm(name='bn_layer',\n channels=_net_params['dense_h'],\n gamma=weights['bn_gamma'], beta=weights['bn_beta'],\n mean=moving_weights['bn_moving_mean'],\n variance=moving_weights['bn_moving_var'],\n input_name='bn_in', output_name='relu1_in',\n epsilon=0.001)\n builder.add_activation(name='relu_layer1', non_linearity='RELU',\n input_name='relu1_in', output_name='dense1_in')\n\n # Softmax\n builder.add_inner_product(name='dense_layer1',\n W=weights['dense1_weight'], b=weights['dense1_bias'],\n has_bias=True,\n input_channels=_net_params['dense_h'],\n output_channels=self.num_classes,\n input_name='dense1_in', output_name='softmax_in')\n\n builder.add_softmax(name=prob_name,\n input_name='softmax_in',\n output_name=prob_name)\n\n\n labels = list(map(str, sorted(self._target_id_map.keys())))\n builder.set_class_labels(labels)\n mlmodel = _cmt.models.MLModel(builder.spec)\n model_type = 'activity classifier'\n mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)\n # Add useful information to the mlmodel\n features_str = ', '.join(self.features)\n mlmodel.input_description['features'] = u'Window \\xd7 [%s]' % features_str\n mlmodel.input_description['lstm_h_in'] = 'LSTM hidden state input'\n mlmodel.input_description['lstm_c_in'] = 'LSTM cell state input'\n mlmodel.output_description[prob_name] = 'Activity prediction probabilities'\n mlmodel.output_description['classLabel'] = 'Class label of top prediction'\n mlmodel.output_description['lstm_h_out'] = 'LSTM hidden state output'\n mlmodel.output_description['lstm_c_out'] = 'LSTM cell state output'\n _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {\n 'prediction_window': str(self.prediction_window),\n 'session_id': self.session_id,\n 'target': self.target,\n 'features': ','.join(self.features),\n 'max_iterations': str(self.max_iterations),\n }, version=ActivityClassifier._PYTHON_ACTIVITY_CLASSIFIER_VERSION)\n spec = mlmodel.get_spec()\n _cmt.models.utils.rename_feature(spec, 'classLabel', label_name)\n _cmt.models.utils.rename_feature(spec, 'lstm_h_in', 'hiddenIn')\n _cmt.models.utils.rename_feature(spec, 'lstm_c_in', 'cellIn')\n _cmt.models.utils.rename_feature(spec, 'lstm_h_out', 'hiddenOut')\n _cmt.models.utils.rename_feature(spec, 'lstm_c_out', 'cellOut')\n _cmt.utils.save_spec(spec, filename)\n\n def predict(self, dataset, output_type='class', output_frequency='per_row'):\n \"\"\"\n Return predictions for ``dataset``, using the trained activity classifier.\n Predictions can be generated as class labels, or as a probability\n vector with probabilities for each class.\n\n The activity classifier generates a single prediction for each\n ``prediction_window`` rows in ``dataset``, per ``session_id``. Thus the\n number of predictions is smaller than the length of ``dataset``. By\n default each prediction is replicated by ``prediction_window`` to return\n a prediction for each row of ``dataset``. Use ``output_frequency`` to\n get the unreplicated predictions.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features used for model training, but does not require\n a target column. Additional columns are ignored.\n\n output_type : {'class', 'probability_vector'}, optional\n Form of each prediction which is one of:\n\n - 'probability_vector': Prediction probability associated with each\n class as a vector. The probability of the first class (sorted\n alphanumerically by name of the class in the training set) is in\n position 0 of the vector, the second in position 1 and so on.\n - 'class': Class prediction. This returns the class with maximum\n probability.\n\n output_frequency : {'per_row', 'per_window'}, optional\n The frequency of the predictions which is one of:\n\n - 'per_window': Return a single prediction for each\n ``prediction_window`` rows in ``dataset`` per ``session_id``.\n - 'per_row': Convenience option to make sure the number of\n predictions match the number of rows in the dataset. Each\n prediction from the model is repeated ``prediction_window``\n times during that window.\n\n Returns\n -------\n out : SArray | SFrame\n If ``output_frequency`` is 'per_row' return an SArray with predictions\n for each row in ``dataset``.\n If ``output_frequency`` is 'per_window' return an SFrame with\n predictions for ``prediction_window`` rows in ``dataset``.\n\n See Also\n ----------\n create, evaluate, classify\n\n Examples\n --------\n\n .. sourcecode:: python\n\n # One prediction per row\n >>> probability_predictions = model.predict(\n ... data, output_type='probability_vector', output_frequency='per_row')[:4]\n >>> probability_predictions\n\n dtype: array\n Rows: 4\n [array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),\n array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),\n array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),\n array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086])]\n\n # One prediction per window\n >>> class_predictions = model.predict(\n ... data, output_type='class', output_frequency='per_window')\n >>> class_predictions\n\n +---------------+------------+-----+\n | prediction_id | session_id |class|\n +---------------+------------+-----+\n | 0 | 3 | 5 |\n | 1 | 3 | 5 |\n | 2 | 3 | 5 |\n | 3 | 3 | 5 |\n | 4 | 3 | 5 |\n | 5 | 3 | 5 |\n | 6 | 3 | 5 |\n | 7 | 3 | 4 |\n | 8 | 3 | 4 |\n | 9 | 3 | 4 |\n | ... | ... | ... |\n +---------------+------------+-----+\n \"\"\"\n _tkutl._raise_error_if_not_sframe(dataset, 'dataset')\n _tkutl._check_categorical_option_type(\n 'output_frequency', output_frequency, ['per_window', 'per_row'])\n _tkutl._check_categorical_option_type(\n 'output_type', output_type, ['probability_vector', 'class'])\n from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter\n from ._sframe_sequence_iterator import prep_data as _prep_data\n\n from ._sframe_sequence_iterator import _ceil_dev\n from ._mx_model_architecture import _net_params\n from ._mps_model_architecture import _define_model_mps, _predict_mps\n from .._mps_utils import (use_mps as _use_mps,\n ac_weights_mxnet_to_mps as _ac_weights_mxnet_to_mps,)\n\n prediction_window = self.prediction_window\n chunked_dataset, num_sessions = _prep_data(dataset, self.features, self.session_id, prediction_window,\n self._predictions_in_chunk, verbose=False)\n\n # Decide whether to use MPS GPU, MXnet GPU or CPU\n num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)\n use_mps = _use_mps() and num_mxnet_gpus == 0\n\n data_iter = _SFrameSequenceIter(chunked_dataset, len(self.features),\n prediction_window, self._predictions_in_chunk,\n self._recalibrated_batch_size, use_pad=True, mx_output=not use_mps)\n\n\n\n if use_mps:\n arg_params, aux_params = self._pred_model.get_params()\n mps_params = _ac_weights_mxnet_to_mps(arg_params, aux_params, _net_params['lstm_h'])\n mps_pred_model = _define_model_mps(self.batch_size, len(self.features), len(self._target_id_map),\n prediction_window, self._predictions_in_chunk, is_prediction_model=True)\n\n mps_pred_model.load(mps_params)\n\n preds = _predict_mps(mps_pred_model, data_iter)\n else:\n preds = self._pred_model.predict(data_iter).asnumpy()\n\n chunked_data = data_iter.dataset\n\n if output_frequency == 'per_row':\n # Replicate each prediction times prediction_window\n preds = preds.repeat(prediction_window, axis=1)\n\n # Remove predictions for padded rows\n unpadded_len = chunked_data['chunk_len'].to_numpy()\n preds = [p[:unpadded_len[i]] for i, p in enumerate(preds)]\n\n # Reshape from (num_of_chunks, chunk_size, num_of_classes)\n # to (ceil(length / prediction_window), num_of_classes)\n # chunk_size is DIFFERENT between chunks - since padding was removed.\n out = _np.concatenate(preds)\n out = out.reshape((-1, len(self._target_id_map)))\n out = _SArray(out)\n\n if output_type == 'class':\n id_target_map = self._id_target_map\n out = out.apply(lambda c: id_target_map[_np.argmax(c)])\n\n elif output_frequency == 'per_window':\n # Calculate the number of expected predictions and\n # remove predictions for padded data\n unpadded_len = chunked_data['chunk_len'].apply(\n lambda l: _ceil_dev(l, prediction_window)).to_numpy()\n preds = [list(p[:unpadded_len[i]]) for i, p in enumerate(preds)]\n\n out = _SFrame({\n self.session_id: chunked_data['session_id'],\n 'preds': _SArray(preds, dtype=list)\n }).stack('preds', new_column_name='probability_vector')\n\n # Calculate the prediction index per session\n out = out.add_row_number(column_name='prediction_id')\n start_sess_idx = out.groupby(\n self.session_id, {'start_idx': _agg.MIN('prediction_id')})\n start_sess_idx = start_sess_idx.unstack(\n [self.session_id, 'start_idx'], new_column_name='idx')['idx'][0]\n\n if output_type == 'class':\n id_target_map = self._id_target_map\n out['probability_vector'] = out['probability_vector'].apply(\n lambda c: id_target_map[_np.argmax(c)])\n out = out.rename({'probability_vector': 'class'})\n\n return out\n\n def evaluate(self, dataset, metric='auto'):\n \"\"\"\n Evaluate the model by making predictions of target values and comparing\n these to actual values.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the session_id, target and features used for model training.\n Additional columns are ignored.\n\n metric : str, optional\n Name of the evaluation metric. Possible values are:\n\n - 'auto' : Returns all available metrics.\n - 'accuracy' : Classification accuracy (micro average).\n - 'auc' : Area under the ROC curve (macro average)\n - 'precision' : Precision score (macro average)\n - 'recall' : Recall score (macro average)\n - 'f1_score' : F1 score (macro average)\n - 'log_loss' : Log loss\n - 'confusion_matrix' : An SFrame with counts of possible\n prediction/true label combinations.\n - 'roc_curve' : An SFrame containing information needed for an\n ROC curve\n\n Returns\n -------\n out : dict\n Dictionary of evaluation results where the key is the name of the\n evaluation metric (e.g. `accuracy`) and the value is the evaluation\n score.\n\n See Also\n ----------\n create, predict\n\n Examples\n ----------\n .. sourcecode:: python\n\n >>> results = model.evaluate(data)\n >>> print results['accuracy']\n \"\"\"\n\n avail_metrics = ['accuracy', 'auc', 'precision', 'recall',\n 'f1_score', 'log_loss', 'confusion_matrix', 'roc_curve']\n _tkutl._check_categorical_option_type(\n 'metric', metric, avail_metrics + ['auto'])\n\n if metric == 'auto':\n metrics = avail_metrics\n else:\n metrics = [metric]\n\n probs = self.predict(dataset, output_type='probability_vector')\n classes = self.predict(dataset, output_type='class')\n\n ret = {}\n if 'accuracy' in metrics:\n ret['accuracy'] = _evaluation.accuracy(dataset[self.target], classes)\n if 'auc' in metrics:\n ret['auc'] = _evaluation.auc(dataset[self.target], probs)\n if 'precision' in metrics:\n ret['precision'] = _evaluation.precision(dataset[self.target], classes)\n if 'recall' in metrics:\n ret['recall'] = _evaluation.recall(dataset[self.target], classes)\n if 'f1_score' in metrics:\n ret['f1_score'] = _evaluation.f1_score(dataset[self.target], classes)\n if 'log_loss' in metrics:\n ret['log_loss'] = _evaluation.log_loss(dataset[self.target], probs)\n if 'confusion_matrix' in metrics:\n ret['confusion_matrix'] = _evaluation.confusion_matrix(dataset[self.target], classes)\n if 'roc_curve' in metrics:\n ret['roc_curve'] = _evaluation.roc_curve(dataset[self.target], probs)\n\n return ret\n\n def classify(self, dataset, output_frequency='per_row'):\n \"\"\"\n Return a classification, for each ``prediction_window`` examples in the\n ``dataset``, using the trained activity classification model. The output\n SFrame contains predictions as both class labels as well as probabilities \n that the predicted value is the associated label.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features and session id used for model training, but\n does not require a target column. Additional columns are ignored.\n\n output_frequency : {'per_row', 'per_window'}, optional\n The frequency of the predictions which is one of:\n\n - 'per_row': Each prediction is returned ``prediction_window`` times.\n - 'per_window': Return a single prediction for each \n ``prediction_window`` rows in ``dataset`` per ``session_id``.\n\n Returns\n -------\n out : SFrame\n An SFrame with model predictions i.e class labels and probabilities.\n\n See Also\n ----------\n create, evaluate, predict\n\n Examples\n ----------\n >>> classes = model.classify(data)\n \"\"\"\n _tkutl._check_categorical_option_type(\n 'output_frequency', output_frequency, ['per_window', 'per_row'])\n id_target_map = self._id_target_map\n preds = self.predict(\n dataset, output_type='probability_vector', output_frequency=output_frequency)\n\n if output_frequency == 'per_row':\n return _SFrame({\n 'class': preds.apply(lambda p: id_target_map[_np.argmax(p)]),\n 'probability': preds.apply(_np.max)\n })\n elif output_frequency == 'per_window':\n preds['class'] = preds['probability_vector'].apply(\n lambda p: id_target_map[_np.argmax(p)])\n preds['probability'] = preds['probability_vector'].apply(_np.max)\n preds = preds.remove_column('probability_vector')\n return preds\n\n def predict_topk(self, dataset, output_type='probability', k=3, output_frequency='per_row'):\n \"\"\"\n Return top-k predictions for the ``dataset``, using the trained model.\n Predictions are returned as an SFrame with three columns: `prediction_id`, \n `class`, and `probability`, or `rank`, depending on the ``output_type``\n parameter.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features and session id used for model training, but\n does not require a target column. Additional columns are ignored.\n\n output_type : {'probability', 'rank'}, optional\n Choose the return type of the prediction:\n\n - `probability`: Probability associated with each label in the prediction.\n - `rank` : Rank associated with each label in the prediction.\n\n k : int, optional\n Number of classes to return for each input example.\n\n output_frequency : {'per_row', 'per_window'}, optional\n The frequency of the predictions which is one of:\n\n - 'per_row': Each prediction is returned ``prediction_window`` times.\n - 'per_window': Return a single prediction for each \n ``prediction_window`` rows in ``dataset`` per ``session_id``.\n\n Returns\n -------\n out : SFrame\n An SFrame with model predictions.\n\n See Also\n --------\n predict, classify, evaluate\n\n Examples\n --------\n >>> pred = m.predict_topk(validation_data, k=3)\n >>> pred\n +---------------+-------+-------------------+\n | row_id | class | probability |\n +---------------+-------+-------------------+\n | 0 | 4 | 0.995623886585 |\n | 0 | 9 | 0.0038311756216 |\n | 0 | 7 | 0.000301006948575 |\n | 1 | 1 | 0.928708016872 |\n | 1 | 3 | 0.0440889261663 |\n | 1 | 2 | 0.0176190119237 |\n | 2 | 3 | 0.996967732906 |\n | 2 | 2 | 0.00151345680933 |\n | 2 | 7 | 0.000637513934635 |\n | 3 | 1 | 0.998070061207 |\n | ... | ... | ... |\n +---------------+-------+-------------------+\n \"\"\"\n _tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])\n id_target_map = self._id_target_map\n preds = self.predict(\n dataset, output_type='probability_vector', output_frequency=output_frequency)\n\n if output_frequency == 'per_row':\n probs = preds\n elif output_frequency == 'per_window':\n probs = preds['probability_vector']\n\n if output_type == 'rank':\n probs = probs.apply(lambda p: [\n {'class': id_target_map[i],\n 'rank': i}\n for i in reversed(_np.argsort(p)[-k:])]\n )\n elif output_type == 'probability':\n probs = probs.apply(lambda p: [\n {'class': id_target_map[i],\n 'probability': p[i]}\n for i in reversed(_np.argsort(p)[-k:])]\n )\n\n if output_frequency == 'per_row':\n output = _SFrame({'probs': probs})\n output = output.add_row_number(column_name='row_id')\n elif output_frequency == 'per_window':\n output = _SFrame({\n 'probs': probs,\n self.session_id: preds[self.session_id],\n 'prediction_id': preds['prediction_id']\n })\n\n output = output.stack('probs', new_column_name='probs')\n output = output.unpack('probs', column_name_prefix='')\n return output\n\n def __str__(self):\n \"\"\"\n Return a string description of the model to the ``print`` method.\n\n Returns\n -------\n out : string\n A description of the ActivityClassifier.\n \"\"\"\n return self.__repr__()\n\n def __repr__(self):\n \"\"\"\n Print a string description of the model when the model name is entered\n in the terminal.\n \"\"\"\n width = 40\n sections, section_titles = self._get_summary_struct()\n out = _tkutl._toolkit_repr_print(self, sections, section_titles,\n width=width)\n return out\n\n def _get_summary_struct(self):\n \"\"\"\n Returns a structured description of the model, including (where\n relevant) the schema of the training data, description of the training\n data, training statistics, and model hyperparameters.\n\n Returns\n -------\n sections : list (of list of tuples)\n A list of summary sections.\n Each section is a list.\n Each item in a section list is a tuple of the form:\n ('<label>','<field>')\n section_titles: list\n A list of section titles.\n The order matches that of the 'sections' object.\n \"\"\"\n model_fields = [\n ('Number of examples', 'num_examples'),\n ('Number of sessions', 'num_sessions'),\n ('Number of classes', 'num_classes'),\n ('Number of feature columns', 'num_features'),\n ('Prediction window', 'prediction_window'),\n ]\n training_fields = [\n ('Log-likelihood', 'training_log_loss'),\n ('Training time (sec)', 'training_time'),\n ]\n\n section_titles = ['Schema', 'Training summary']\n return([model_fields, training_fields], section_titles)\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.util.testing.assert_frame_equal",
"pandas.DataFrame"
],
[
"pandas.Series",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.std",
"numpy.mean",
"numpy.array"
],
[
"numpy.concatenate",
"numpy.argsort",
"numpy.expand_dims",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anuragreddygv323/raster-vision | [
"14a6495f23bbef0bf7f7c47fb37b856a559b272f",
"db2bc35f21968618a333cee2f5e86f29e7d56483"
] | [
"src/rastervision/semseg/tasks/utils.py",
"src/detection/scripts/aggregate_predictions.py"
] | [
"\"\"\"Utility functions shared across tasks.\"\"\"\nimport numpy as np\nimport matplotlib as mpl\n# For headless environments\nmpl.use('Agg') # NOQA\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nfrom rastervision.common.utils import plot_img_row\n\n\ndef predict_x(x, model):\n batch_x = np.expand_dims(x, axis=0)\n batch_y = model.predict(batch_x)\n y = np.squeeze(batch_y, axis=0)\n return y\n\n\ndef make_prediction_img(x, target_size, predict):\n \"\"\"Generate a prediction image one window at a time.\n\n Generate a prediction image consisting of a prediction for each pixel. The\n format of that prediction depends on the output of the predict function.\n Passing a very large image as input to a model might\n not be possible due to memory limitations. Instead, we slide a window over\n the image and get the predictions for each window. The individual\n predictions can be combined to create a large prediction image. By\n overlapping the windows, we can discard inaccurate predictions along window\n boundaries.\n\n # Arguments\n x: the full sized image to get a prediction for\n (nb_rows, nb_cols, nb_channels)\n target_size: the window size which needs to be the same as what the\n model expects as input\n predict: a function that takes a window image of size\n target_size and returns the prediction for each pixel\n\n # Returns\n The prediction image\n \"\"\"\n quarter_target_size = target_size // 4\n half_target_size = target_size // 2\n sample_prediction = predict(x[0:target_size, 0:target_size, :])\n nb_channels = sample_prediction.shape[2]\n dtype = sample_prediction.dtype\n\n pad_width = (\n (quarter_target_size, target_size),\n (quarter_target_size, target_size),\n (0, 0))\n\n pad_x = np.pad(x, pad_width, 'edge')\n pad_y = np.zeros(\n (pad_x.shape[0], pad_x.shape[1], nb_channels),\n dtype=dtype)\n\n def update_prediction_center(row_begin, row_end, col_begin, col_end):\n \"\"\"Just update the center half of the window.\"\"\"\n\n x_window = pad_x[row_begin:row_end, col_begin:col_end, :]\n y_window = predict(x_window)\n\n y_window_center = y_window[\n quarter_target_size:target_size - quarter_target_size,\n quarter_target_size:target_size - quarter_target_size,\n :]\n\n pad_y[\n row_begin + quarter_target_size:row_end - quarter_target_size,\n col_begin + quarter_target_size:col_end - quarter_target_size,\n :] = y_window_center\n\n for row_begin in range(0, pad_x.shape[0], half_target_size):\n for col_begin in range(0, pad_x.shape[1], half_target_size):\n row_end = row_begin + target_size\n col_end = col_begin + target_size\n if row_end <= pad_x.shape[0] and col_end <= pad_x.shape[1]:\n update_prediction_center(\n row_begin, row_end, col_begin, col_end)\n\n y = pad_y[quarter_target_size:quarter_target_size+x.shape[0],\n quarter_target_size:quarter_target_size+x.shape[1],\n :]\n return y\n\n\ndef make_legend(label_keys, label_names):\n patches = []\n for label_key, label_name in zip(label_keys, label_names):\n color = tuple(np.array(label_key) / 255.)\n patch = mpatches.Patch(\n facecolor=color, edgecolor='black', linewidth=0.5,\n label=label_name)\n patches.append(patch)\n plt.legend(handles=patches, loc='upper left',\n bbox_to_anchor=(1, 1), fontsize=4)\n\n\ndef plot_prediction(generator, all_x, y, pred,\n file_path, is_debug=False):\n dataset = generator.dataset\n fig = plt.figure()\n\n nb_subplot_cols = 3\n if is_debug:\n nb_subplot_cols += len(generator.active_input_inds)\n\n grid_spec = mpl.gridspec.GridSpec(1, nb_subplot_cols)\n\n all_x = generator.calibrate_image(all_x)\n rgb_input_im = all_x[:, :, dataset.rgb_inds]\n imgs = [rgb_input_im]\n titles = ['RGB']\n\n if is_debug:\n ir_im = all_x[:, :, dataset.ir_ind]\n imgs.append(ir_im)\n titles.append('IR')\n\n depth_im = all_x[:, :, dataset.depth_ind]\n imgs.append(depth_im)\n titles.append('Depth')\n\n ndvi_im = all_x[:, :, dataset.ndvi_ind]\n imgs.append(ndvi_im)\n titles.append('NDVI')\n\n imgs.append(y)\n titles.append('Ground Truth')\n\n imgs.append(pred)\n titles.append('Prediction')\n\n plot_img_row(fig, grid_spec, 0, imgs, titles)\n make_legend(dataset.label_keys, dataset.label_names)\n plt.savefig(file_path, bbox_inches='tight', format='png', dpi=300)\n\n plt.close(fig)\n",
"import json\nimport argparse\nfrom os import makedirs\nfrom os.path import join, splitext\n\nimport numpy as np\nfrom scipy.ndimage import imread\nfrom scipy.misc import imsave\nimport matplotlib as mpl\nmpl.use('Agg') # NOQA\nimport matplotlib.pyplot as plt\nfrom cv2 import groupRectangles\n\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\nfrom utils import load_tiff\nfrom settings import max_num_classes\n\n\ndef compute_agg_predictions(window_offsets, window_size, im_size, predictions):\n ''' Aggregate window predictions into predictions for original image. '''\n boxes = []\n scores = []\n classes = []\n\n file_names = sorted(predictions.keys())\n for file_name in file_names:\n preds = predictions[file_name]\n x, y = window_offsets[file_name]\n\n for box in preds['boxes']:\n # box is (ymin, xmin, ymax, xmax) in relative coords\n # (eg. 0.5 is middle of axis).\n # x, y are in pixel offsets.\n box = np.array(box) * window_size\n\n box[0] += y # ymin\n box[1] += x # xmin\n box[2] += y # ymax\n box[3] += x # xmax\n\n box[0] /= im_size[1]\n box[1] /= im_size[0]\n box[2] /= im_size[1]\n box[3] /= im_size[0]\n\n box = np.clip(box, 0, 1).tolist()\n boxes.append(box)\n\n scores.extend(preds['scores'])\n classes.extend([int(class_id) for class_id in preds['classes']])\n\n return boxes, scores, classes\n\n\ndef plot_predictions(plot_path, im, category_index, boxes, scores, classes):\n min_val = np.min(im)\n max_val = np.max(im)\n norm_im = 256 * ((im - min_val) / (max_val - min_val))\n norm_im = norm_im.astype(np.uint8)\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n norm_im,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4)\n\n imsave(plot_path, norm_im)\n\n\ndef box_to_cv2_rect(im_size, box):\n ymin, xmin, ymax, xmax = box\n width = xmax - xmin\n height = ymax - ymin\n\n xmin = int(xmin * im_size[0])\n width = int(width * im_size[0])\n ymin = int(ymin * im_size[1])\n height = int(height * im_size[1])\n\n rect = (xmin, ymin, width, height)\n return rect\n\n\ndef cv2_rect_to_box(im_size, rect):\n x, y, width, height = rect\n\n x /= im_size[0]\n width /= im_size[0]\n y /= im_size[1]\n height /= im_size[1]\n\n box = [y, x, y + height, x + width]\n return box\n\n\n# From https://stackoverflow.com/questions/28723670/intersection-over-union-between-two-detections # noqa\ndef bb_intersection_over_union(boxA, boxB):\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = (xB - xA + 1) * (yB - yA + 1)\n\n # compute the area of both the prediction and ground-truth\n # rectangles\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou\n\n\ndef rect_to_bbox(rect):\n x, y, width, height = rect\n return [x, y, x + width, y + height]\n\n\ndef group_boxes(boxes, scores, im_size):\n '''Group boxes belonging to a single class.'''\n # Convert boxes to opencv rectangles\n rects = []\n for box_ind in range(boxes.shape[0]):\n box = boxes[box_ind, :].tolist()\n rect = box_to_cv2_rect(im_size, box)\n rects.append(rect)\n\n # Add last rect again to ensure that there are at least two rectangles\n # which seems to be required by groupRectangles.\n rects.append(rect)\n\n # Group the rects\n group_threshold = 1\n # May need to tune this parameter for other datasets depending on size\n # of detected objects.\n eps = 0.5\n grouped_rects = groupRectangles(rects, group_threshold, eps)[0]\n grouped_boxes = []\n grouped_scores = []\n\n # Find the rects and corresponding scores that best match the grouped_rects\n for grouped_rect in grouped_rects:\n bbox1 = rect_to_bbox(grouped_rect)\n best_iou = 0.0\n best_ind = None\n\n for rect_ind, rect in enumerate(rects[:-1]):\n bbox2 = rect_to_bbox(rect)\n iou = bb_intersection_over_union(bbox1, bbox2)\n if iou > best_iou:\n best_iou = iou\n best_ind = rect_ind\n\n grouped_boxes.append(cv2_rect_to_box(im_size, rects[best_ind]))\n grouped_scores.append(scores[best_ind])\n\n return grouped_boxes, grouped_scores\n\n\ndef group_predictions(boxes, classes, scores, im_size):\n '''For each class, group boxes that are overlapping.'''\n unique_classes = list(set(classes))\n\n boxes = np.array(boxes)\n classes = np.array(classes)\n scores = np.array(scores)\n\n grouped_boxes = []\n grouped_classes = []\n grouped_scores = []\n\n for class_id in unique_classes:\n class_boxes = boxes[classes == class_id]\n class_scores = scores[classes == class_id]\n\n class_grouped_boxes, class_grouped_scores = \\\n group_boxes(class_boxes, class_scores, im_size)\n grouped_boxes.extend(class_grouped_boxes)\n grouped_classes.extend([class_id] * len(class_grouped_boxes))\n grouped_scores.extend(class_grouped_scores)\n\n return grouped_boxes, grouped_classes, grouped_scores\n\n\ndef save_geojson(path, boxes, classes, scores, im_size, category_index,\n image_dataset=None):\n polygons = []\n for box in boxes:\n x, y, width, height = box_to_cv2_rect(im_size, box)\n nw = (x, y)\n ne = (x + width, y)\n se = (x + width, y + height)\n sw = (x, y + height)\n polygon = [nw, ne, se, sw, nw]\n # Transform from pixel coords to spatial coords\n if image_dataset:\n polygon = [image_dataset.ul(point[1], point[0])\n for point in polygon]\n polygons.append(polygon)\n\n crs = None\n if image_dataset:\n # XXX not sure if I'm getting this properly\n crs_name = image_dataset.crs['init']\n crs = {\n 'type': 'name',\n 'properties': {\n 'name': crs_name\n }\n }\n\n features = [{\n 'type': 'Feature',\n 'properties': {\n 'class_id': int(class_id),\n 'class_name': category_index[class_id]['name'],\n 'score': score\n\n },\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [polygon]\n }\n }\n for polygon, class_id, score in zip(polygons, classes, scores)\n ]\n\n geojson = {\n 'type': 'FeatureCollection',\n 'crs': crs,\n 'features': features\n }\n\n with open(path, 'w') as json_file:\n json.dump(geojson, json_file, indent=4)\n\n\ndef aggregate_predictions(image_path, window_info_path, predictions_path,\n label_map_path, output_dir):\n print('Aggregating predictions over windows...')\n\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n image_dataset = None\n if splitext(image_path)[1] == '.tif':\n im, image_dataset = load_tiff(image_path)\n else:\n im = imread(image_path)\n\n im_size = [im.shape[1], im.shape[0]]\n\n with open(window_info_path) as window_info_file:\n window_info = json.load(window_info_file)\n window_offsets = window_info['offsets']\n window_size = window_info['window_size']\n\n with open(predictions_path) as predictions_file:\n predictions = json.load(predictions_file)\n\n makedirs(output_dir, exist_ok=True)\n boxes, scores, classes = compute_agg_predictions(\n window_offsets, window_size, im_size, predictions)\n # Due to the sliding window approach, sometimes there are multiple\n # slightly different detections where there should only be one. So\n # we group them together.\n # boxes, classes, scores = group_predictions(boxes, classes, scores, im_size)\n\n agg_predictions_path = join(output_dir, 'predictions.geojson')\n save_geojson(agg_predictions_path, boxes, classes, scores, im_size,\n category_index, image_dataset=image_dataset)\n\n plot_path = join(output_dir, 'predictions.jpg')\n plot_predictions(plot_path, im, category_index, boxes, scores, classes)\n\n\ndef parse_args():\n description = \"\"\"\n Aggregate predictions from windows into predictions over original\n image. The output is GeoJSON in the CRS of the input image.\n \"\"\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument('--image-path')\n parser.add_argument('--window-info-path')\n parser.add_argument('--predictions-path')\n parser.add_argument('--label-map-path')\n parser.add_argument('--output-dir')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n print(args)\n\n aggregate_predictions(\n args.image_path, args.window_info_path, args.predictions_path,\n args.label_map_path, args.output_dir)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"numpy.expand_dims",
"numpy.pad",
"matplotlib.use",
"numpy.squeeze",
"matplotlib.pyplot.savefig",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"scipy.ndimage.imread",
"numpy.min",
"scipy.misc.imsave",
"matplotlib.use",
"numpy.squeeze",
"numpy.clip",
"numpy.max",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.10",
"0.16",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"1.2"
],
"tensorflow": []
}
] |
yuyunli2/faster_rcnn | [
"c8ddaa02fdc8ca36438713f2584d83dbbfae9ed9"
] | [
"vis_tool.py"
] | [
"import time\n\nimport numpy as np\nimport matplotlib\nimport torch as t\nimport visdom\n\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plot\n\n\nVOC_BBOX_LABEL_NAMES = (\n 'fly',\n 'bike',\n 'bird',\n 'boat',\n 'pin',\n 'bus',\n 'c',\n 'cat',\n 'chair',\n 'cow',\n 'table',\n 'dog',\n 'horse',\n 'moto',\n 'p',\n 'plant',\n 'shep',\n 'sofa',\n 'train',\n 'tv',\n)\n\n\ndef vis_image(img, ax=None):\n if(ax is None):\n fig = plot.figure()\n ax = fig.add_subplot(1, 1, 1)\n img = img.transpose((1, 2, 0))\n ax.imshow(img.astype(np.uint8))\n\n return ax\n\n\ndef vis_bbox(img, bbox, label=None, score=None, ax=None):\n label_names = list(VOC_BBOX_LABEL_NAMES) + ['bg']\n\n if label is not None and not len(bbox) == len(label):\n raise ValueError('The length of label must be same as that of bbox')\n if score is not None and not len(bbox) == len(score):\n raise ValueError('The length of score must be same as that of bbox')\n\n ax = vis_image(img, ax=ax)\n\n if(len(bbox) == 0):\n return ax\n\n for i, bb in enumerate(bbox):\n xy = (bb[1], bb[0])\n height = bb[2] - bb[0]\n width = bb[3] - bb[1]\n ax.add_patch(plot.Rectangle(\n xy, width, height, fill=False, edgecolor='red', linewidth=2))\n\n caption = list()\n\n if(label is not None and label_names is not None):\n lb = label[i]\n if(not (-1 <= lb < len(label_names))):\n raise ValueError('No corresponding name is given')\n caption.append(label_names[lb])\n if(score is not None):\n sc = score[i]\n caption.append('{:.2f}'.format(sc))\n\n if(len(caption) > 0):\n ax.text(bb[1], bb[0], ': '.join(caption), style='italic', bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 0})\n \n return ax\n\n\ndef fig2data(fig):\n fig.canvas.draw()\n\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = (w, h, 4)\n\n buf = np.roll(buf, 3, axis=2)\n return buf.reshape(h, w, 4)\n\n\ndef fig4vis(fig):\n ax = fig.get_figure()\n img_data = fig2data(ax).astype(np.int32)\n plot.close()\n\n return img_data[:, :, :3].transpose((2, 0, 1)) / 255.\n\n\ndef visdom_bbox(*args, **kwargs):\n fig = vis_bbox(*args, **kwargs)\n data = fig4vis(fig)\n return data\n\n\nclass Visualizer(object):\n def __init__(self, env='default', **kwargs):\n self.vis = visdom.Visdom(env=env, use_incoming_socket=False, **kwargs)\n self._vis_kw = kwargs\n self.index = {}\n self.log_text = ''\n\n def reinit(self, env='default', **kwargs):\n self.vis = visdom.Visdom(env=env, **kwargs)\n return self\n\n def plot_many(self, d):\n for k, v in d.items():\n if v is not None:\n self.plot(k, v)\n\n def img_many(self, d):\n for k, v in d.items():\n self.img(k, v)\n\n def plot(self, name, y, **kwargs):\n x = self.index.get(name, 0)\n self.vis.line(Y=np.array([y]), X=np.array([x]), win=name, opts=dict(title=name), update=None if x == 0 else 'append', **kwargs)\n self.index[name] = x + 1\n\n def img(self, name, img_, **kwargs):\n self.vis.images(t.Tensor(img_).cpu().numpy(), win=name, opts=dict(title=name), **kwargs)\n\n def log(self, info, win='log_text'):\n self.log_text += ('[{time}] {info} <br>'.format(time=time.strftime('%m%d_%H%M%S'), info=info))\n self.vis.text(self.log_text, win)\n\n def __getattr__(self, name):\n return getattr(self.vis, name)\n\n def state_dict(self):\n return {'index': self.index, 'vis_kw': self._vis_kw, 'log_text': self.log_text,'env': self.vis.env}\n\n def load_state_dict(self, d):\n self.vis = visdom.Visdom(env=d.get('env', self.vis.env), **(self.d.get('vis_kw')))\n self.log_text = d.get('log_text', '')\n self.index = d.get('index', dict())\n \n return self"
] | [
[
"matplotlib.pyplot.Rectangle",
"torch.Tensor",
"matplotlib.use",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.roll",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deeptimittal12/python-neo | [
"7409f47b5debd4d2a75bbf0e77ac10562446c97a",
"7409f47b5debd4d2a75bbf0e77ac10562446c97a"
] | [
"neo/io/brainwaredamio.py",
"neo/rawio/spike2rawio.py"
] | [
"'''\nClass for reading from Brainware DAM files\n\nDAM files are binary files for holding raw data. They are broken up into\nsequence of Segments, each containing a single raw trace and parameters.\n\nThe DAM file does NOT contain a sampling rate, nor can it be reliably\ncalculated from any of the parameters. You can calculate it from\nthe \"sweep length\" attribute if it is present, but it isn't always present.\nIt is more reliable to get it from the corresponding SRC file or F32 file if\nyou have one.\n\nThe DAM file also does not divide up data into Blocks, so only a single\nBlock is returned..\n\nBrainware was developed by Dr. Jan Schnupp and is availabe from\nTucker Davis Technologies, Inc.\nhttp://www.tdt.com/downloads.htm\n\nNeither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the\ndevelopment of this code\n\nThe code is implemented with the permission of Dr. Jan Schnupp\n\nAuthor: Todd Jennings\n'''\n\n# import needed core python modules\nimport os\nimport os.path\n\n# numpy and quantities are already required by neo\nimport numpy as np\nimport quantities as pq\n\n# needed core neo modules\nfrom neo.core import (AnalogSignal, Block,\n ChannelIndex, Segment)\n\n# need to subclass BaseIO\nfrom neo.io.baseio import BaseIO\n\n\nclass BrainwareDamIO(BaseIO):\n \"\"\"\n Class for reading Brainware raw data files with the extension '.dam'.\n\n The read_block method returns the first Block of the file. It will\n automatically close the file after reading.\n The read method is the same as read_block.\n\n Note:\n\n The file format does not contain a sampling rate. The sampling rate\n is set to 1 Hz, but this is arbitrary. If you have a corresponding .src\n or .f32 file, you can get the sampling rate from that. It may also be\n possible to infer it from the attributes, such as \"sweep length\", if\n present.\n\n Usage:\n >>> from neo.io.brainwaredamio import BrainwareDamIO\n >>> damfile = BrainwareDamIO(filename='multi_500ms_mulitrep_ch1.dam')\n >>> blk1 = damfile.read()\n >>> blk2 = damfile.read_block()\n >>> print blk1.segments\n >>> print blk1.segments[0].analogsignals\n >>> print blk1.units\n >>> print blk1.units[0].name\n >>> print blk2\n >>> print blk2[0].segments\n \"\"\"\n\n is_readable = True # This class can only read data\n is_writable = False # write is not supported\n\n # This class is able to directly or indirectly handle the following objects\n # You can notice that this greatly simplifies the full Neo object hierarchy\n supported_objects = [Block, ChannelIndex,\n Segment, AnalogSignal]\n\n readable_objects = [Block]\n writeable_objects = []\n\n has_header = False\n is_streameable = False\n\n # This is for GUI stuff: a definition for parameters when reading.\n # This dict should be keyed by object (`Block`). Each entry is a list\n # of tuple. The first entry in each tuple is the parameter name. The\n # second entry is a dict with keys 'value' (for default value),\n # and 'label' (for a descriptive name).\n # Note that if the highest-level object requires parameters,\n # common_io_test will be skipped.\n read_params = {Block: []}\n\n # do not support write so no GUI stuff\n write_params = None\n name = 'Brainware DAM File'\n extensions = ['dam']\n\n mode = 'file'\n\n def __init__(self, filename=None):\n '''\n Arguments:\n filename: the filename\n '''\n BaseIO.__init__(self)\n self._path = filename\n self._filename = os.path.basename(filename)\n self._fsrc = None\n\n def read(self, lazy=False, **kargs):\n '''\n Reads raw data file \"fname\" generated with BrainWare\n '''\n assert not lazy, 'Do not support lazy'\n return self.read_block(lazy=lazy)\n\n def read_block(self, lazy=False, **kargs):\n '''\n Reads a block from the raw data file \"fname\" generated\n with BrainWare\n '''\n assert not lazy, 'Do not support lazy'\n\n # there are no keyargs implemented to so far. If someone tries to pass\n # them they are expecting them to do something or making a mistake,\n # neither of which should pass silently\n if kargs:\n raise NotImplementedError('This method does not have any '\n 'arguments implemented yet')\n self._fsrc = None\n\n block = Block(file_origin=self._filename)\n\n # create the objects to store other objects\n chx = ChannelIndex(file_origin=self._filename,\n channel_ids=np.array([1]),\n index=np.array([0]),\n channel_names=np.array(['Chan1'], dtype='S'))\n\n # load objects into their containers\n block.channel_indexes.append(chx)\n\n # open the file\n with open(self._path, 'rb') as fobject:\n # while the file is not done keep reading segments\n while True:\n seg = self._read_segment(fobject)\n # if there are no more Segments, stop\n if not seg:\n break\n\n # store the segment and signals\n seg.analogsignals[0].channel_index = chx\n block.segments.append(seg)\n\n # remove the file object\n self._fsrc = None\n\n block.create_many_to_one_relationship()\n return block\n\n # -------------------------------------------------------------------------\n # -------------------------------------------------------------------------\n # IMPORTANT!!!\n # These are private methods implementing the internal reading mechanism.\n # Due to the way BrainWare DAM files are structured, they CANNOT be used\n # on their own. Calling these manually will almost certainly alter your\n # position in the file in an unrecoverable manner, whether they throw\n # an exception or not.\n # -------------------------------------------------------------------------\n # -------------------------------------------------------------------------\n\n def _read_segment(self, fobject):\n '''\n Read a single segment with a single analogsignal\n\n Returns the segment or None if there are no more segments\n '''\n\n try:\n # float64 -- start time of the AnalogSignal\n t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]\n except IndexError:\n # if there are no more Segments, return\n return False\n\n # int16 -- index of the stimulus parameters\n seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()\n\n # int16 -- number of stimulus parameters\n numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]\n\n # read the name strings for the stimulus parameters\n paramnames = []\n for _ in range(numelements):\n # unit8 -- the number of characters in the string\n numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]\n\n # char * numchars -- a single name string\n name = np.fromfile(fobject, dtype=np.uint8, count=numchars)\n\n # exclude invalid characters\n name = str(name[name >= 32].view('c').tostring())\n\n # add the name to the list of names\n paramnames.append(name)\n\n # float32 * numelements -- the values for the stimulus parameters\n paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)\n\n # combine parameter names and the parameters as a dict\n params = dict(zip(paramnames, paramvalues))\n\n # int32 -- the number elements in the AnalogSignal\n numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]\n\n # int16 * numpts -- the AnalogSignal itself\n signal = np.fromfile(fobject, dtype=np.int16, count=numpts)\n\n sig = AnalogSignal(signal.astype(np.float) * pq.mV,\n t_start=t_start * pq.d,\n file_origin=self._filename,\n sampling_period=1. * pq.s,\n copy=False)\n # Note: setting the sampling_period to 1 s is arbitrary\n\n # load the AnalogSignal and parameters into a new Segment\n seg = Segment(file_origin=self._filename,\n index=seg_index,\n **params)\n seg.analogsignals = [sig]\n\n return seg\n",
"\"\"\"\nClasse for reading data in CED spike2 files (.smr).\n\nThis code is based on:\n - sonpy, written by Antonio Gonzalez <[email protected]>\n Disponible here ::\n http://www.neuro.ki.se/broberger/\n\nand sonpy come from :\n - SON Library 2.0 for MATLAB, written by Malcolm Lidierth at\n King's College London.\n See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html\n\nThis IO support old (<v6) and new files (>v7) of spike2\n\n\nAuthor: Samuel Garcia\n\n\"\"\"\n# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3\n\nfrom .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,\n _event_channel_dtype)\n\nimport numpy as np\nfrom collections import OrderedDict\n\n\nclass Spike2RawIO(BaseRawIO):\n \"\"\"\n\n \"\"\"\n extensions = ['smr']\n rawmode = 'one-file'\n\n def __init__(self, filename='', take_ideal_sampling_rate=False, ced_units=True,\n try_signal_grouping=True):\n BaseRawIO.__init__(self)\n self.filename = filename\n\n self.take_ideal_sampling_rate = take_ideal_sampling_rate\n self.ced_units = ced_units\n self.try_signal_grouping = try_signal_grouping\n\n def _parse_header(self):\n\n # get header info and channel_info\n with open(self.filename, 'rb') as fid:\n self._global_info = read_as_dict(fid, headerDescription)\n info = self._global_info\n if info['system_id'] < 6:\n info['dtime_base'] = 1e-6\n info['datetime_detail'] = 0\n info['datetime_year'] = 0\n\n self._time_factor = info['us_per_time'] * info['dtime_base']\n\n self._channel_infos = []\n for chan_id in range(info['channels']):\n fid.seek(512 + 140 * chan_id)\n chan_info = read_as_dict(fid, channelHeaderDesciption1)\n\n if chan_info['kind'] in [1, 6]:\n dt = [('scale', 'f4'), ('offset', 'f4'), ('unit', 'S6'), ]\n chan_info.update(read_as_dict(fid, dt))\n\n elif chan_info['kind'] in [7, 9]:\n dt = [('min', 'f4'), ('max', 'f4'), ('unit', 'S6'), ]\n chan_info.update(read_as_dict(fid, dt))\n\n elif chan_info['kind'] in [4]:\n dt = [('init_low', 'u1'), ('next_low', 'u1'), ]\n chan_info.update(read_as_dict(fid, dt))\n\n if chan_info['kind'] in [1, 6, 7, 9]:\n if info['system_id'] < 6:\n chan_info.update(read_as_dict(fid, [('divide', 'i2')]))\n else:\n chan_info.update(read_as_dict(fid, [('interleave', 'i2')]))\n\n chan_info['type'] = dict_kind[chan_info['kind']]\n\n if chan_info['blocks'] == 0:\n chan_info['t_start'] = 0. # this means empty signals\n else:\n fid.seek(chan_info['firstblock'])\n block_info = read_as_dict(fid, blockHeaderDesciption)\n chan_info['t_start'] = float(block_info['start_time']) * \\\n float(info['us_per_time']) * float(info['dtime_base'])\n\n self._channel_infos.append(chan_info)\n\n # get data blocks index for all channel\n # run through all data block of of channel to prepare chan to block maps\n self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')\n self._all_data_blocks = {}\n self._by_seg_data_blocks = {}\n for chan_id, chan_info in enumerate(self._channel_infos):\n data_blocks = []\n ind = chan_info['firstblock']\n for b in range(chan_info['blocks']):\n block_info = self._memmap[ind:ind + 20].view(blockHeaderDesciption)[0]\n data_blocks.append((ind, block_info['items'], 0,\n block_info['start_time'], block_info['end_time']))\n ind = block_info['succ_block']\n\n data_blocks = np.array(data_blocks, dtype=[(\n 'pos', 'int32'), ('size', 'int32'), ('cumsum', 'int32'),\n ('start_time', 'int32'), ('end_time', 'int32')])\n data_blocks['pos'] += 20 # 20 is ths header size\n\n self._all_data_blocks[chan_id] = data_blocks\n self._by_seg_data_blocks[chan_id] = []\n\n # For all signal channel detect gaps between data block (pause in rec) so new Segment.\n # then check that all channel have the same gaps.\n # this part is tricky because we need to check that all channel have same pause.\n all_gaps_block_ind = {}\n for chan_id, chan_info in enumerate(self._channel_infos):\n if chan_info['kind'] in [1, 9]:\n data_blocks = self._all_data_blocks[chan_id]\n sig_size = np.sum(self._all_data_blocks[chan_id]['size'])\n if sig_size > 0:\n interval = get_sample_interval(info, chan_info) / self._time_factor\n # detect gaps\n inter_block_sizes = data_blocks['start_time'][1:] - \\\n data_blocks['end_time'][:-1]\n gaps_block_ind, = np.nonzero(inter_block_sizes > interval)\n all_gaps_block_ind[chan_id] = gaps_block_ind\n\n # find t_start/t_stop for each seg based on gaps indexe\n self._sig_t_starts = {}\n self._sig_t_stops = {}\n if len(all_gaps_block_ind) == 0:\n # this means no signal channels\n nb_segment = 1\n # loop over event/spike channel to get the min/max time\n t_start, t_stop = None, None\n for chan_id, chan_info in enumerate(self._channel_infos):\n data_blocks = self._all_data_blocks[chan_id]\n if data_blocks.size > 0:\n # if t_start is None or data_blocks[0]['start_time']<t_start:\n # t_start = data_blocks[0]['start_time']\n if t_stop is None or data_blocks[-1]['end_time'] > t_stop:\n t_stop = data_blocks[-1]['end_time']\n # self._seg_t_starts = [t_start]\n self._seg_t_starts = [0]\n self._seg_t_stops = [t_stop]\n else:\n all_nb_seg = np.array([v.size + 1 for v in all_gaps_block_ind.values()])\n assert np.all(all_nb_seg[0] == all_nb_seg), \\\n 'Signal channel have differents pause so diffrents nb_segment'\n nb_segment = int(all_nb_seg[0])\n\n for chan_id, gaps_block_ind in all_gaps_block_ind.items():\n data_blocks = self._all_data_blocks[chan_id]\n self._sig_t_starts[chan_id] = []\n self._sig_t_stops[chan_id] = []\n\n for seg_ind in range(nb_segment):\n if seg_ind == 0:\n fisrt_bl = 0\n else:\n fisrt_bl = gaps_block_ind[seg_ind - 1] + 1\n self._sig_t_starts[chan_id].append(data_blocks[fisrt_bl]['start_time'])\n\n if seg_ind < nb_segment - 1:\n last_bl = gaps_block_ind[seg_ind]\n else:\n last_bl = data_blocks.size - 1\n\n self._sig_t_stops[chan_id].append(data_blocks[last_bl]['end_time'])\n\n in_seg_data_block = data_blocks[fisrt_bl:last_bl + 1]\n in_seg_data_block['cumsum'][1:] = np.cumsum(in_seg_data_block['size'][:-1])\n self._by_seg_data_blocks[chan_id].append(in_seg_data_block)\n\n self._seg_t_starts = []\n self._seg_t_stops = []\n for seg_ind in range(nb_segment):\n # there is a small delay between all channel so take the max/min for t_start/t_stop\n t_start = min(\n self._sig_t_starts[chan_id][seg_ind] for chan_id in self._sig_t_starts)\n t_stop = max(self._sig_t_stops[chan_id][seg_ind] for chan_id in self._sig_t_stops)\n self._seg_t_starts.append(t_start)\n self._seg_t_stops.append(t_stop)\n\n # create typed channels\n sig_channels = []\n unit_channels = []\n event_channels = []\n\n self.internal_unit_ids = {}\n for chan_id, chan_info in enumerate(self._channel_infos):\n if chan_info['kind'] in [1, 6, 7, 9]:\n if self.take_ideal_sampling_rate:\n sampling_rate = info['ideal_rate']\n else:\n sample_interval = get_sample_interval(info, chan_info)\n sampling_rate = (1. / sample_interval)\n\n name = chan_info['title']\n\n if chan_info['kind'] in [1, 9]:\n # AnalogSignal\n if chan_id not in self._sig_t_starts:\n continue\n units = chan_info['unit']\n if chan_info['kind'] == 1: # int16\n gain = chan_info['scale'] / 6553.6\n offset = chan_info['offset']\n sig_dtype = 'int16'\n elif chan_info['kind'] == 9: # float32\n gain = 1.\n offset = 0.\n sig_dtype = 'float32'\n group_id = 0\n sig_channels.append((name, chan_id, sampling_rate, sig_dtype,\n units, gain, offset, group_id))\n\n elif chan_info['kind'] in [2, 3, 4, 5, 8]:\n # Event\n event_channels.append((name, chan_id, 'event'))\n\n elif chan_info['kind'] in [6, 7]: # SpikeTrain with waveforms\n wf_units = chan_info['unit']\n if chan_info['kind'] == 6:\n wf_gain = chan_info['scale'] / 6553.6\n wf_offset = chan_info['offset']\n wf_left_sweep = chan_info['n_extra'] // 4\n elif chan_info['kind'] == 7:\n wf_gain = 1.\n wf_offset = 0.\n wf_left_sweep = chan_info['n_extra'] // 8\n wf_sampling_rate = sampling_rate\n if self.ced_units:\n # this is a hudge pain because need\n # to jump over all blocks\n data_blocks = self._all_data_blocks[chan_id]\n dt = get_channel_dtype(chan_info)\n unit_ids = set()\n for bl in range(data_blocks.size):\n ind0 = data_blocks[bl]['pos']\n ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0\n raw_data = self._memmap[ind0:ind1].view(dt)\n marker = raw_data['marker'] & 255\n unit_ids.update(np.unique(marker))\n unit_ids = sorted(list(unit_ids))\n else:\n # All spike from one channel are group in one SpikeTrain\n unit_ids = ['all']\n for unit_id in unit_ids:\n unit_index = len(unit_channels)\n self.internal_unit_ids[unit_index] = (chan_id, unit_id)\n _id = \"ch{}#{}\".format(chan_id, unit_id)\n unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,\n wf_left_sweep, wf_sampling_rate))\n\n sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)\n unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)\n event_channels = np.array(event_channels, dtype=_event_channel_dtype)\n\n if len(sig_channels) > 0:\n if self.try_signal_grouping:\n # try to group signals channel if same sampling_rate/dtype/...\n # it can raise error for some files (when they do not have signal length)\n common_keys = ['sampling_rate', 'dtype', 'units', 'gain', 'offset']\n characteristics = sig_channels[common_keys]\n unique_characteristics = np.unique(characteristics)\n self._sig_dtypes = {}\n for group_id, charact in enumerate(unique_characteristics):\n chan_grp_indexes, = np.nonzero(characteristics == charact)\n sig_channels['group_id'][chan_grp_indexes] = group_id\n\n # check same size for channel in groups\n for seg_index in range(nb_segment):\n sig_sizes = []\n for ind in chan_grp_indexes:\n chan_id = sig_channels[ind]['id']\n sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])\n sig_sizes.append(sig_size)\n sig_sizes = np.array(sig_sizes)\n assert np.all(sig_sizes == sig_sizes[0]),\\\n 'Signal channel in groups do not have same size'\\\n ', use try_signal_grouping=False'\n self._sig_dtypes[group_id] = np.dtype(charact['dtype'])\n else:\n # if try_signal_grouping fail the user can try to split each channel in\n # separate group\n sig_channels['group_id'] = np.arange(sig_channels.size)\n self._sig_dtypes = {s['group_id']: np.dtype(s['dtype']) for s in sig_channels}\n\n # fille into header dict\n self.header = {}\n self.header['nb_block'] = 1\n self.header['nb_segment'] = [nb_segment]\n self.header['signal_channels'] = sig_channels\n self.header['unit_channels'] = unit_channels\n self.header['event_channels'] = event_channels\n\n # Annotations\n self._generate_minimal_annotations()\n bl_ann = self.raw_annotations['blocks'][0]\n bl_ann['system_id'] = info['system_id']\n seg_ann = bl_ann['segments'][0]\n seg_ann['system_id'] = info['system_id']\n\n for c, sig_channel in enumerate(sig_channels):\n chan_id = sig_channel['id']\n anasig_an = seg_ann['signals'][c]\n anasig_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']\n anasig_an['comment'] = self._channel_infos[chan_id]['comment']\n\n for c, unit_channel in enumerate(unit_channels):\n chan_id, unit_id = self.internal_unit_ids[c]\n unit_an = seg_ann['units'][c]\n unit_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']\n unit_an['comment'] = self._channel_infos[chan_id]['comment']\n\n for c, event_channel in enumerate(event_channels):\n chan_id = int(event_channel['id'])\n ev_an = seg_ann['events'][c]\n ev_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']\n ev_an['comment'] = self._channel_infos[chan_id]['comment']\n\n def _source_name(self):\n return self.filename\n\n def _segment_t_start(self, block_index, seg_index):\n return self._seg_t_starts[seg_index] * self._time_factor\n\n def _segment_t_stop(self, block_index, seg_index):\n return self._seg_t_stops[seg_index] * self._time_factor\n\n def _check_channel_indexes(self, channel_indexes):\n if channel_indexes is None:\n channel_indexes = slice(None)\n channel_indexes = np.arange(self.header['signal_channels'].size)[channel_indexes]\n return channel_indexes\n\n def _get_signal_size(self, block_index, seg_index, channel_indexes):\n channel_indexes = self._check_channel_indexes(channel_indexes)\n chan_id = self.header['signal_channels'][channel_indexes[0]]['id']\n sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])\n return sig_size\n\n def _get_signal_t_start(self, block_index, seg_index, channel_indexes):\n channel_indexes = self._check_channel_indexes(channel_indexes)\n chan_id = self.header['signal_channels'][channel_indexes[0]]['id']\n return self._sig_t_starts[chan_id][seg_index] * self._time_factor\n\n def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):\n if i_start is None:\n i_start = 0\n if i_stop is None:\n i_stop = self._get_signal_size(block_index, seg_index, channel_indexes)\n\n channel_indexes = self._check_channel_indexes(channel_indexes)\n chan_index = channel_indexes[0]\n chan_id = self.header['signal_channels'][chan_index]['id']\n group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']\n dt = self._sig_dtypes[group_id]\n\n raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype=dt)\n for c, channel_index in enumerate(channel_indexes):\n # NOTE: this actual way is slow because we run throught\n # the file for each channel. The loop should be reversed.\n # But there is no garanty that channels shared the same data block\n # indexes. So this make the job too difficult.\n chan_header = self.header['signal_channels'][channel_index]\n chan_id = chan_header['id']\n data_blocks = self._by_seg_data_blocks[chan_id][seg_index]\n\n # loop over data blocks and get chunks\n bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')\n bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')\n ind = 0\n for bl in range(bl0, bl1):\n ind0 = data_blocks[bl]['pos']\n ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0\n data = self._memmap[ind0:ind1].view(dt)\n if bl == bl1 - 1:\n # right border\n # be carfull that bl could be both bl0 and bl1!!\n border = data.size - (i_stop - data_blocks[bl]['cumsum'])\n if border > 0:\n data = data[:-border]\n if bl == bl0:\n # left border\n border = i_start - data_blocks[bl]['cumsum']\n data = data[border:]\n raw_signals[ind:data.size + ind, c] = data\n ind += data.size\n return raw_signals\n\n def _count_in_time_slice(self, seg_index, chan_id, lim0, lim1, marker_filter=None):\n # count event or spike in time slice\n data_blocks = self._all_data_blocks[chan_id]\n chan_info = self._channel_infos[chan_id]\n dt = get_channel_dtype(chan_info)\n nb = 0\n for bl in range(data_blocks.size):\n ind0 = data_blocks[bl]['pos']\n ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0\n raw_data = self._memmap[ind0:ind1].view(dt)\n ts = raw_data['tick']\n keep = (ts >= lim0) & (ts <= lim1)\n if marker_filter is not None:\n keep2 = (raw_data['marker'] & 255) == marker_filter\n keep = keep & keep2\n nb += np.sum(keep)\n if ts[-1] > lim1:\n break\n return nb\n\n def _get_internal_timestamp_(self, seg_index, chan_id,\n t_start, t_stop, other_field=None, marker_filter=None):\n chan_info = self._channel_infos[chan_id]\n # data_blocks = self._by_seg_data_blocks[chan_id][seg_index]\n data_blocks = self._all_data_blocks[chan_id]\n dt = get_channel_dtype(chan_info)\n\n if t_start is None:\n # lim0 = 0\n lim0 = self._seg_t_starts[seg_index]\n else:\n lim0 = int(t_start / self._time_factor)\n\n if t_stop is None:\n # lim1 = 2**32\n lim1 = self._seg_t_stops[seg_index]\n else:\n lim1 = int(t_stop / self._time_factor)\n\n timestamps = []\n othervalues = []\n for bl in range(data_blocks.size):\n ind0 = data_blocks[bl]['pos']\n ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0\n raw_data = self._memmap[ind0:ind1].view(dt)\n ts = raw_data['tick']\n keep = (ts >= lim0) & (ts <= lim1)\n if marker_filter is not None:\n keep2 = (raw_data['marker'] & 255) == marker_filter\n keep = keep & keep2\n\n timestamps.append(ts[keep])\n if other_field is not None:\n othervalues.append(raw_data[other_field][keep])\n if ts[-1] > lim1:\n break\n\n if len(timestamps) > 0:\n timestamps = np.concatenate(timestamps)\n else:\n timestamps = np.zeros(0, dtype='int16')\n\n if other_field is None:\n return timestamps\n else:\n if len(timestamps) > 0:\n othervalues = np.concatenate(othervalues)\n else:\n othervalues = np.zeros(0, dtype=dt.fields[other_field][0])\n return timestamps, othervalues\n\n def _spike_count(self, block_index, seg_index, unit_index):\n chan_id, unit_id = self.internal_unit_ids[unit_index]\n if self.ced_units:\n marker_filter = unit_id\n else:\n marker_filter = None\n lim0 = self._seg_t_starts[seg_index]\n lim1 = self._seg_t_stops[seg_index]\n return self._count_in_time_slice(seg_index, chan_id,\n lim0, lim1, marker_filter=marker_filter)\n\n def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):\n unit_header = self.header['unit_channels'][unit_index]\n chan_id, unit_id = self.internal_unit_ids[unit_index]\n\n if self.ced_units:\n marker_filter = unit_id\n else:\n marker_filter = None\n\n spike_timestamps = self._get_internal_timestamp_(seg_index,\n chan_id, t_start, t_stop,\n marker_filter=marker_filter)\n\n return spike_timestamps\n\n def _rescale_spike_timestamp(self, spike_timestamps, dtype):\n spike_times = spike_timestamps.astype(dtype)\n spike_times *= self._time_factor\n return spike_times\n\n def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):\n unit_header = self.header['unit_channels'][unit_index]\n chan_id, unit_id = self.internal_unit_ids[unit_index]\n\n if self.ced_units:\n marker_filter = unit_id\n else:\n marker_filter = None\n\n timestamps, waveforms = self._get_internal_timestamp_(seg_index, chan_id,\n t_start, t_stop,\n other_field='waveform',\n marker_filter=marker_filter)\n\n waveforms = waveforms.reshape(timestamps.size, 1, -1)\n\n return waveforms\n\n def _event_count(self, block_index, seg_index, event_channel_index):\n event_header = self.header['event_channels'][event_channel_index]\n chan_id = int(event_header['id']) # because set to string in header\n lim0 = self._seg_t_starts[seg_index]\n lim1 = self._seg_t_stops[seg_index]\n return self._count_in_time_slice(seg_index, chan_id, lim0, lim1, marker_filter=None)\n\n def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):\n event_header = self.header['event_channels'][event_channel_index]\n chan_id = int(event_header['id']) # because set to string in header\n chan_info = self._channel_infos[chan_id]\n\n if chan_info['kind'] == 5:\n timestamps, labels = self._get_internal_timestamp_(seg_index,\n chan_id, t_start, t_stop,\n other_field='marker')\n elif chan_info['kind'] == 8:\n timestamps, labels = self._get_internal_timestamp_(seg_index,\n chan_id, t_start, t_stop,\n other_field='label')\n else:\n timestamps = self._get_internal_timestamp_(seg_index,\n chan_id, t_start, t_stop, other_field=None)\n labels = np.zeros(timestamps.size, dtype='U')\n\n labels = labels.astype('U')\n durations = None\n\n return timestamps, durations, labels\n\n def _rescale_event_timestamp(self, event_timestamps, dtype):\n event_times = event_timestamps.astype(dtype)\n event_times *= self._time_factor\n return event_times\n\n\ndef read_as_dict(fid, dtype):\n \"\"\"\n Given a file descriptor (seek at the good place externally)\n and a numpy.dtype of the binary struct return a dict.\n Make conversion for strings.\n \"\"\"\n dt = np.dtype(dtype)\n h = np.frombuffer(fid.read(dt.itemsize), dt)[0]\n info = OrderedDict()\n for k in dt.names:\n v = h[k]\n\n if dt[k].kind == 'S':\n v = v.decode('iso-8859-1')\n if len(v) > 0:\n l = ord(v[0])\n v = v[1:l + 1]\n\n info[k] = v\n return info\n\n\ndef get_channel_dtype(chan_info):\n \"\"\"\n Get dtype by kind.\n \"\"\"\n if chan_info['kind'] == 1: # Raw signal\n dt = 'int16'\n elif chan_info['kind'] in [2, 3, 4]: # Event data\n dt = [('tick', 'i4')]\n elif chan_info['kind'] in [5]: # Marker data\n dt = [('tick', 'i4'), ('marker', 'i4')]\n elif chan_info['kind'] in [6]: # AdcMark data (waveform)\n dt = [('tick', 'i4'), ('marker', 'i4'),\n # ('adc', 'S%d' % chan_info['n_extra'])]\n ('waveform', 'int16', chan_info['n_extra'] // 2)]\n elif chan_info['kind'] in [7]: # RealMark data (waveform)\n dt = [('tick', 'i4'), ('marker', 'i4'),\n # ('real', 'S%d' % chan_info['n_extra'])]\n ('waveform', 'float32', chan_info['n_extra'] // 4)]\n elif chan_info['kind'] in [8]: # TextMark data\n dt = [('tick', 'i4'), ('marker', 'i4'),\n ('label', 'S%d' % chan_info['n_extra'])]\n elif chan_info['kind'] == 9: # Float signal\n dt = 'float32'\n dt = np.dtype(dt)\n return dt\n\n\ndef get_sample_interval(info, chan_info):\n \"\"\"\n Get sample interval for one channel\n \"\"\"\n if info['system_id'] in [1, 2, 3, 4, 5]: # Before version 5\n sample_interval = (chan_info['divide'] * info['us_per_time'] *\n info['time_per_adc']) * 1e-6\n else:\n sample_interval = (chan_info['l_chan_dvd'] *\n info['us_per_time'] * info['dtime_base'])\n return sample_interval\n\n\n# headers structures :\nheaderDescription = [\n ('system_id', 'i2'),\n ('copyright', 'S10'),\n ('creator', 'S8'),\n ('us_per_time', 'i2'),\n ('time_per_adc', 'i2'),\n ('filestate', 'i2'),\n ('first_data', 'i4'), # i8\n ('channels', 'i2'),\n ('chan_size', 'i2'),\n ('extra_data', 'i2'),\n ('buffersize', 'i2'),\n ('os_format', 'i2'),\n ('max_ftime', 'i4'), # i8\n ('dtime_base', 'f8'),\n ('datetime_detail', 'u1'),\n ('datetime_year', 'i2'),\n ('pad', 'S52'),\n ('comment1', 'S80'),\n ('comment2', 'S80'),\n ('comment3', 'S80'),\n ('comment4', 'S80'),\n ('comment5', 'S80'),\n]\n\nchannelHeaderDesciption1 = [\n ('del_size', 'i2'),\n ('next_del_block', 'i4'), # i8\n ('firstblock', 'i4'), # i8\n ('lastblock', 'i4'), # i8\n ('blocks', 'i2'),\n ('n_extra', 'i2'),\n ('pre_trig', 'i2'),\n ('free0', 'i2'),\n ('py_sz', 'i2'),\n ('max_data', 'i2'),\n ('comment', 'S72'),\n ('max_chan_time', 'i4'), # i8\n ('l_chan_dvd', 'i4'), # i8\n ('phy_chan', 'i2'),\n ('title', 'S10'),\n ('ideal_rate', 'f4'),\n ('kind', 'u1'),\n ('unused1', 'i1'),\n]\n\nblockHeaderDesciption = [\n ('pred_block', 'i4'), # i8\n ('succ_block', 'i4'), # i8\n ('start_time', 'i4'), # i8\n ('end_time', 'i4'), # i8\n ('channel_num', 'i2'),\n ('items', 'i2'),\n]\n\ndict_kind = {\n 0: 'empty',\n 1: 'Adc',\n 2: 'EventFall',\n 3: 'EventRise',\n 4: 'EventBoth',\n 5: 'Marker',\n 6: 'AdcMark',\n 7: 'RealMark',\n 8: 'TextMark',\n 9: 'RealWave',\n}\n"
] | [
[
"numpy.array",
"numpy.fromfile"
],
[
"numpy.nonzero",
"numpy.unique",
"numpy.arange",
"numpy.memmap",
"numpy.cumsum",
"numpy.dtype",
"numpy.all",
"numpy.concatenate",
"numpy.searchsorted",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeonardoSirino/FuzzyTableExtractor | [
"114f5b2b1c65bfcaa84cb75c876b68ce1974c821"
] | [
"fuzzy_table_extractor/extractor.py"
] | [
"from collections import deque\nfrom dataclasses import dataclass\nfrom enum import Enum, auto\nfrom typing import Callable, Iterable, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom .handlers.base_handler import BaseHandler, BaseNode, TreeFileHandler\nfrom .util import match_regex_list, str_comparison\n\n\n@dataclass\nclass TableMatch:\n search_term: str\n original_term: str\n score: str\n\n\nclass FieldOrientation(Enum):\n ROW = auto()\n COLUMN = auto()\n\n\nclass Extractor:\n \"\"\"The Extractor class has the functions to extract data from tables in document.\n It receives a document handler on initialization, this handler follows an interface, so the extraction is agnostic of the document type.\n \"\"\"\n\n def __init__(self, doc_handler: BaseHandler) -> None:\n \"\"\"Initialize the extractor with a document handler\n\n Args:\n doc_handler (BaseHandler): document handler to extract data from\n \"\"\"\n self.doc_handler = doc_handler\n\n def extract_closest_table(\n self,\n search_headers: List[str],\n validation_funtion: Callable[[List[str]], bool] = lambda x: True,\n minimum_proximity_ratio: float = 0,\n ) -> pd.DataFrame:\n \"\"\"Extract the table in document that has the closest header to search_headers\n\n Args:\n search_headers (List[str]): list of itens to search in header.\n validation_funtion (Callable[[List[str]], bool], optional): function to validate if the table is valid. This function receives the table header as argument and must return True if the table is valid. Defaults to lambda x: True.\n minimum_proximity_ratio (float, optional): minimum proximity ratio to consider there is a match in header. Value must be between 0 and 100. Defaults to 0.\n\n Returns:\n pd.DataFrame: best match\n \"\"\"\n if minimum_proximity_ratio < 0 or minimum_proximity_ratio > 100:\n raise ValueError(\"minimum_proximity_ratio must be between 0 and 100\")\n\n tables = self.doc_handler.tables\n ratios = []\n for df in tables:\n if validation_funtion(df.columns.to_list()):\n ratio = self.headers_proximity_ratio(\n document_headers=df.columns.to_list(), search_headers=search_headers\n )\n\n ratios.append(ratio)\n else:\n ratios.append(0)\n\n if len(ratios) == 0:\n return pd.DataFrame()\n\n best_ratio = np.max(ratios)\n if best_ratio < minimum_proximity_ratio:\n return pd.DataFrame()\n\n best_match = tables[np.argmax(ratios)]\n\n df = self.get_columns_fuzzy(best_match, search_headers)\n\n return df\n\n def extract_single_field(\n self,\n field: str,\n orientation: FieldOrientation,\n regex: List[str] = [\"\"],\n title_regex: List[str] = [\"\"],\n return_multiple: bool = False,\n ) -> str:\n \"\"\"Extract single field of a word document based on a input string.\n The data will be extracted from tables in document\n\n Args:\n field (str): search field\n orientation (FieldOrientation): orientation to search the content of field\n regex (List[str], optional): list of regex to apply to content. To be a valid content there must be at least one match of regex in list. Defaults to [''].\n title_regex (List[str], optional): list of regex to apply to title. To be a valid title there must be at least one match of regex in list. Defaults to [''].\n return_multiple (bool, optional): if True, will return all matches that has the same proximity ratio. Defaults to False.\n\n Returns:\n str: best match\n \"\"\"\n df = self.doc_handler.dictionary\n\n df = df[df[\"orientation\"] == orientation.name.lower()]\n df = df[df[\"content\"].apply(lambda x: match_regex_list(x, regex))]\n df = df[df[\"title\"].apply(lambda x: match_regex_list(x, title_regex))]\n\n if df.empty:\n return \"\"\n\n df[\"ratio\"] = df[\"title\"].apply(lambda x: str_comparison(x, field))\n df.sort_values(by=\"ratio\", inplace=True, ascending=False)\n\n try:\n if return_multiple:\n max_ratio = df[\"ratio\"].max()\n values = df[df[\"ratio\"] == max_ratio][\"content\"].to_list()\n best_match = \", \".join(values)\n else:\n best_match = df[\"content\"].values[0]\n except IndexError:\n best_match = \"\"\n\n return best_match\n\n @staticmethod\n def headers_proximity_ratio(\n document_headers: List[str], search_headers: List[str]\n ) -> int:\n \"\"\"Calculates a proximity ratio of two headers\n\n Args:\n document_headers (List[str]): headers in document\n search_headers (List[str]): search headers\n\n Returns:\n int: proximity ratio\n \"\"\"\n matches = Extractor.headers_association(document_headers, search_headers)\n\n if len(matches) == 0:\n return 0\n\n scores = [x.score for x in matches]\n\n return min(scores)\n\n @staticmethod\n def headers_association(\n document_headers: List[str], search_headers: List[str]\n ) -> List[TableMatch]:\n # TODO I think this can be improved\n \"\"\"Determine the best association of two headers\n\n Args:\n document_headers (List[str]): headers in document\n search_headers (List[str]): search headers\n\n Returns:\n List[TableMatch]: list of table headers matches\n \"\"\"\n if len(search_headers) > len(document_headers):\n return []\n\n matches = []\n\n for s_header in search_headers:\n scores = [str_comparison(x, s_header) for x in document_headers]\n\n max_index = np.argmax(scores)\n max_score = np.max(scores)\n\n entry = TableMatch(\n search_term=s_header,\n original_term=document_headers[max_index],\n score=max_score,\n )\n\n matches.append(entry)\n\n document_headers.pop(max_index)\n\n return matches\n\n @staticmethod\n def get_columns_fuzzy(\n df: pd.DataFrame, columns: List[str], threshold=0\n ) -> pd.DataFrame:\n \"\"\"Get columns that hat the closest match with supplied columns names\n The columns will be renamed to match the closest column name\n\n Args:\n df (pd.DataFrame): dataframe to search columns\n columns (List[str]): columns to search\n threshold (int, optional): minimum score to consider a match. Defaults to 0.\n\n Returns:\n List[str]: columns that match\n \"\"\"\n association = Extractor.headers_association(df.columns.to_list(), columns)\n\n association = [x for x in association if x.score > threshold]\n\n original = [x.original_term for x in association]\n df = df[original]\n\n rename_dict = {x.original_term: x.search_term for x in association}\n df.rename(columns=rename_dict, inplace=True)\n\n return df\n\n\n@dataclass\nclass _SectionPath:\n nodes: List[BaseNode]\n score: float = 0\n\n def add_node(self, node: BaseNode, section_names: List[str]):\n name = section_names[len(self.nodes) - 1]\n self.score += str_comparison(name, node.title)\n\n\nclass TreeExtractor(Extractor):\n def __init__(self, doc_handler: TreeFileHandler):\n super().__init__(doc_handler)\n self.doc_handler = doc_handler\n\n def get_closest_section(self, titles: List[str]) -> BaseNode:\n \"\"\"Get the closest section given a list of section titles.\n To reach the closest section all possible paths will be explored, the best path\n is the one with the highest sum of title comparison scores.\n\n Args:\n titles (List[str]): list of section titles\n\n Returns:\n BaseNode: closest section\n \"\"\"\n\n initial_path = _SectionPath(nodes=[self.doc_handler.root], score=0)\n paths = deque([initial_path])\n valid_paths = []\n\n while paths:\n path = paths.popleft()\n for node in path.nodes:\n new_path = _SectionPath(nodes=path.nodes[:], score=path.score)\n new_path.add_node(node, titles)\n\n if len(new_path.nodes) == len(titles) + 1:\n valid_paths.append(new_path)\n else:\n paths.append(new_path)\n\n best_path = max(valid_paths, key=lambda x: x.score)\n return best_path\n"
] | [
[
"numpy.max",
"numpy.argmax",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ccyjw8860/deep-text-recognition-benchmark | [
"96f526e6d58e2d0685a5e062f472a3cb7310b8be"
] | [
"train_original.py"
] | [
"import os\nimport sys\nimport time\nimport random\nimport string\nimport argparse\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.optim as optim\nimport torch.utils.data\nimport numpy as np\n\nfrom utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\nfrom dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\nfrom model import Model\nfrom test import validation\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef train(opt):\n \"\"\" dataset preparation \"\"\"\n if not opt.data_filtering_off:\n print('Filtering the images containing characters which are not in opt.character')\n print('Filtering the images whose label is longer than opt.batch_max_length')\n # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130\n\n opt.select_data = opt.select_data.split('-')\n opt.batch_ratio = opt.batch_ratio.split('-')\n train_dataset = Batch_Balanced_Dataset(opt)\n\n log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')\n AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset, batch_size=opt.batch_size,\n shuffle=True, # 'True' to check training progress with validation function.\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_valid, pin_memory=True)\n log.write(valid_dataset_log)\n print('-' * 80)\n log.write('-' * 80 + '\\n')\n log.close()\n\n \"\"\" model configuration \"\"\"\n if 'CTC' in opt.Prediction:\n if opt.baiduCTC:\n converter = CTCLabelConverterForBaiduWarpctc(opt.character)\n else:\n converter = CTCLabelConverter(opt.character)\n else:\n converter = AttnLabelConverter(opt.character)\n opt.num_class = len(converter.character)\n\n if opt.rgb:\n opt.input_channel = 3\n model = Model(opt)\n print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,\n opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,\n opt.SequenceModeling, opt.Prediction)\n\n # weight initialization\n for name, param in model.named_parameters():\n if 'localization_fc2' in name:\n print(f'Skip {name} as it is already initialized')\n continue\n try:\n if 'bias' in name:\n init.constant_(param, 0.0)\n elif 'weight' in name:\n init.kaiming_normal_(param)\n except Exception as e: # for batchnorm.\n if 'weight' in name:\n param.data.fill_(1)\n continue\n\n # data parallel for multi-GPU\n model = torch.nn.DataParallel(model).to(device)\n model.train()\n if opt.saved_model != '':\n print(f'loading pretrained model from {opt.saved_model}')\n if opt.FT:\n model.load_state_dict(torch.load(opt.saved_model), strict=False)\n else:\n model.load_state_dict(torch.load(opt.saved_model))\n print(\"Model:\")\n print(model)\n\n \"\"\" setup loss \"\"\"\n if 'CTC' in opt.Prediction:\n if opt.baiduCTC:\n # need to install warpctc. see our guideline.\n from warpctc_pytorch import CTCLoss\n criterion = CTCLoss()\n else:\n criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)\n else:\n criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0\n # loss averager\n loss_avg = Averager()\n\n # filter that only require gradient decent\n filtered_parameters = []\n params_num = []\n for p in filter(lambda p: p.requires_grad, model.parameters()):\n filtered_parameters.append(p)\n params_num.append(np.prod(p.size()))\n print('Trainable params num : ', sum(params_num))\n # [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]\n\n # setup optimizer\n if opt.adam:\n optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))\n else:\n optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)\n print(\"Optimizer:\")\n print(optimizer)\n\n \"\"\" final options \"\"\"\n # print(opt)\n with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:\n opt_log = '------------ Options -------------\\n'\n args = vars(opt)\n for k, v in args.items():\n opt_log += f'{str(k)}: {str(v)}\\n'\n opt_log += '---------------------------------------\\n'\n print(opt_log)\n opt_file.write(opt_log)\n\n \"\"\" start training \"\"\"\n start_iter = 0\n if opt.saved_model != '':\n try:\n start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])\n print(f'continue to train, start_iter: {start_iter}')\n except:\n pass\n\n start_time = time.time()\n best_accuracy = -1\n best_norm_ED = -1\n iteration = start_iter\n\n while (True):\n # train part\n image_tensors, labels = train_dataset.get_batch()\n image = image_tensors.to(device)\n text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)\n batch_size = image.size(0)\n\n if 'CTC' in opt.Prediction:\n preds = model(image, text)\n preds_size = torch.IntTensor([preds.size(1)] * batch_size)\n if opt.baiduCTC:\n preds = preds.permute(1, 0, 2) # to use CTCLoss format\n cost = criterion(preds, text, preds_size, length) / batch_size\n else:\n preds = preds.log_softmax(2).permute(1, 0, 2)\n cost = criterion(preds, text, preds_size, length)\n\n else:\n preds = model(image, text[:, :-1]) # align with Attention.forward\n target = text[:, 1:] # without [GO] Symbol\n cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))\n\n model.zero_grad()\n cost.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)\n optimizer.step()\n\n loss_avg.add(cost)\n\n # validation part\n if (\n iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'\n elapsed_time = time.time() - start_time\n # for log\n with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:\n model.eval()\n with torch.no_grad():\n valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(\n model, criterion, valid_loader, converter, opt)\n model.train()\n\n # training loss and validation loss\n loss_log = f'[{iteration + 1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'\n loss_avg.reset()\n\n current_model_log = f'{\"Current_accuracy\":17s}: {current_accuracy:0.3f}, {\"Current_norm_ED\":17s}: {current_norm_ED:0.2f}'\n\n # keep best accuracy model (on valid dataset)\n if current_accuracy > best_accuracy:\n best_accuracy = current_accuracy\n torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')\n if current_norm_ED > best_norm_ED:\n best_norm_ED = current_norm_ED\n torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')\n best_model_log = f'{\"Best_accuracy\":17s}: {best_accuracy:0.3f}, {\"Best_norm_ED\":17s}: {best_norm_ED:0.2f}'\n\n loss_model_log = f'{loss_log}\\n{current_model_log}\\n{best_model_log}'\n print(loss_model_log)\n log.write(loss_model_log + '\\n')\n\n # show some predicted results\n dashed_line = '-' * 80\n head = f'{\"Ground Truth\":25s} | {\"Prediction\":25s} | Confidence Score & T/F'\n predicted_result_log = f'{dashed_line}\\n{head}\\n{dashed_line}\\n'\n for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):\n if 'Attn' in opt.Prediction:\n gt = gt[:gt.find('[s]')]\n pred = pred[:pred.find('[s]')]\n\n predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\\t{str(pred == gt)}\\n'\n predicted_result_log += f'{dashed_line}'\n print(predicted_result_log)\n log.write(predicted_result_log + '\\n')\n\n # save model per 1e+5 iter.\n if (iteration + 1) % 1e+5 == 0:\n torch.save(\n model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration + 1}.pth')\n\n if (iteration + 1) == opt.num_iter:\n print('end the training')\n sys.exit()\n iteration += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', help='Where to store logs and models')\n parser.add_argument('--train_data', required=True, help='path to training dataset', default=\"D:/data/data_lmdb_release/training\")\n parser.add_argument('--valid_data', required=True, help='path to validation dataset', default=\"D:/data/data_lmdb_release/validation\")\n parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')\n parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\n parser.add_argument('--batch_size', type=int, default=192, help='input batch size')\n parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')\n parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')\n parser.add_argument('--saved_model', default='', help=\"path to model to continue training\")\n parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')\n parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')\n parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')\n parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')\n parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')\n parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')\n parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')\n parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')\n \"\"\" Data processing \"\"\"\n parser.add_argument('--select_data', type=str, default='MJ-ST',\n help='select training data (default is MJ-ST, which means MJ and ST used as training data)')\n parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',\n help='assign ratio for each selected data in the batch')\n parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',\n help='total data usage ratio, this ratio is multiplied to total number of data.')\n parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser.add_argument('--rgb', action='store_true', help='use rgb input')\n parser.add_argument('--character', type=str,\n default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')\n \"\"\" Model Architecture \"\"\"\n parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')\n parser.add_argument('--FeatureExtraction', type=str, required=True,\n help='FeatureExtraction stage. VGG|RCNN|ResNet')\n parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')\n parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')\n parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser.add_argument('--input_channel', type=int, default=1,\n help='the number of input channel of Feature extractor')\n parser.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\n parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\n opt = parser.parse_args()\n\n if not opt.exp_name:\n opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'\n opt.exp_name += f'-Seed{opt.manualSeed}'\n # print(opt.exp_name)\n\n os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)\n\n \"\"\" vocab / character number configuration \"\"\"\n if opt.sensitive:\n # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n\n \"\"\" Seed and GPU setting \"\"\"\n # print(\"Random Seed: \", opt.manualSeed)\n random.seed(opt.manualSeed)\n np.random.seed(opt.manualSeed)\n torch.manual_seed(opt.manualSeed)\n torch.cuda.manual_seed(opt.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n opt.num_gpu = torch.cuda.device_count()\n # print('device count', opt.num_gpu)\n if opt.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n opt.workers = opt.workers * opt.num_gpu\n opt.batch_size = opt.batch_size * opt.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"\n\n train(opt)"
] | [
[
"torch.optim.Adam",
"torch.nn.CrossEntropyLoss",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.nn.init.constant_",
"torch.manual_seed",
"torch.load",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.nn.CTCLoss",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.optim.Adadelta",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Seondong/talkingdata_kaggle_201608 | [
"b9ddbb343dacbcfdaa4b2732c9ea23bf776c773b"
] | [
"code/xgboost_baseline.py"
] | [
"__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'\n\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nimport xgboost as xgb\nimport random\nimport zipfile\nimport time\nimport shutil\nfrom sklearn.metrics import log_loss\n\nrandom.seed(2016)\n\ndef run_xgb(train, test, features, target, random_state=0):\n eta = 0.1\n max_depth = 3\n subsample = 0.7\n colsample_bytree = 0.7\n start_time = time.time()\n\n print('XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(eta, max_depth, subsample, colsample_bytree))\n params = {\n \"objective\": \"multi:softprob\",\n \"num_class\": 12,\n \"booster\" : \"gbtree\",\n \"eval_metric\": \"mlogloss\",\n \"eta\": eta,\n \"max_depth\": max_depth,\n \"subsample\": subsample,\n \"colsample_bytree\": colsample_bytree,\n \"silent\": 1,\n \"seed\": random_state,\n }\n num_boost_round = 500\n early_stopping_rounds = 50\n test_size = 0.3\n\n X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)\n print('Length train:', len(X_train.index))\n print('Length valid:', len(X_valid.index))\n y_train = X_train[target]\n y_valid = X_valid[target]\n dtrain = xgb.DMatrix(X_train[features], y_train)\n dvalid = xgb.DMatrix(X_valid[features], y_valid)\n\n watchlist = [(dtrain, 'train'), (dvalid, 'eval')]\n gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=True)\n\n print(\"Validating...\")\n check = gbm.predict(xgb.DMatrix(X_valid[features]), ntree_limit=gbm.best_iteration)\n score = log_loss(y_valid.tolist(), check)\n\n print(\"Predict test set...\")\n test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration)\n\n print('Training time: {} minutes'.format(round((time.time() - start_time)/60, 2)))\n return test_prediction.tolist(), score\n\n\ndef create_submission(score, test, prediction):\n # Make Submission\n now = datetime.datetime.now()\n sub_file = 'submission_' + str(score) + '_' + str(now.strftime(\"%Y-%m-%d-%H-%M\")) + '.csv'\n print('Writing submission: ', sub_file)\n f = open(sub_file, 'w')\n f.write('device_id,F23-,F24-26,F27-28,F29-32,F33-42,F43+,M22-,M23-26,M27-28,M29-31,M32-38,M39+\\n')\n total = 0\n test_val = test['device_id'].values\n for i in range(len(test_val)):\n str1 = str(test_val[i])\n for j in range(12):\n str1 += ',' + str(prediction[i][j])\n str1 += '\\n'\n total += 1\n f.write(str1)\n f.close()\n\n\ndef map_column(table, f):\n labels = sorted(table[f].unique())\n mappings = dict()\n for i in range(len(labels)):\n mappings[labels[i]] = i\n table = table.replace({f: mappings})\n return table\n\n\ndef read_train_test():\n # Events\n print('Read events...')\n events = pd.read_csv(\"../input/events.csv\", dtype={'device_id': np.str})\n events['counts'] = events.groupby(['device_id'])['event_id'].transform('count')\n events_small = events[['device_id', 'counts']].drop_duplicates('device_id', keep='first')\n\n # Phone brand\n print('Read brands...')\n pbd = pd.read_csv(\"../input/phone_brand_device_model.csv\", dtype={'device_id': np.str})\n pbd.drop_duplicates('device_id', keep='first', inplace=True)\n pbd = map_column(pbd, 'phone_brand')\n pbd = map_column(pbd, 'device_model')\n\n # Train\n print('Read train...')\n train = pd.read_csv(\"../input/gender_age_train.csv\", dtype={'device_id': np.str})\n train = map_column(train, 'group')\n train = train.drop(['age'], axis=1)\n train = train.drop(['gender'], axis=1)\n train = pd.merge(train, pbd, how='left', on='device_id', left_index=True)\n train = pd.merge(train, events_small, how='left', on='device_id', left_index=True)\n train.fillna(-1, inplace=True)\n\n # Test\n print('Read test...')\n test = pd.read_csv(\"../input/gender_age_test.csv\", dtype={'device_id': np.str})\n test = pd.merge(test, pbd, how='left', on='device_id', left_index=True)\n test = pd.merge(test, events_small, how='left', on='device_id', left_index=True)\n test.fillna(-1, inplace=True)\n\n # Features\n features = list(test.columns.values)\n features.remove('device_id')\n\n return train, test, features\n\n\ntrain, test, features = read_train_test()\nprint('Length of train: ', len(train))\nprint('Length of test: ', len(test))\nprint('Features [{}]: {}'.format(len(features), sorted(features)))\ntest_prediction, score = run_xgb(train, test, features, 'group')\nprint(\"LS: {}\".format(round(score, 5)))\ncreate_submission(score, test, test_prediction)"
] | [
[
"pandas.merge",
"sklearn.cross_validation.train_test_split",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
shengyushen/training | [
"0db663b86001dfc359da98c1504a7a3cb8e1f617"
] | [
"mnist/mnist_with_summaries_bf16.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple MNIST classifier which displays summaries in TensorBoard.\n\nThis is an unimpressive MNIST model, but it is a good example of using\ntf.name_scope to make a graph legible in the TensorBoard graph explorer, and of\nnaming summary tags so that they are grouped meaningfully in TensorBoard.\n\nIt demonstrates the functionality of every TensorBoard dashboard.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nimport tensorflow as tf\n#from tensorflow.python.lib.io import file_io\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nfrom pgrad import *\nFLAGS = None\n\n\ndef train():\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir,\n fake_data=FLAGS.fake_data)\n\n #sess = tf.InteractiveSession()\n sess = tf.InteractiveSession()\n # Create a multilayer model.\n\n # Input placeholders\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n y_ = tf.placeholder(tf.int64, [None], name='y-input')\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n tf.summary.image('input', image_shaped_input, 10)\n\n # We can't initialize these variables to 0 - the network will get stuck.\n def weight_variable(shape):\n \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(input_tensor=var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(input_tensor=var))\n tf.summary.scalar('min', tf.reduce_min(input_tensor=var))\n tf.summary.histogram('histogram', var)\n\n def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n \"\"\"Reusable code for making a simple neural net layer.\n\n It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n It also sets up name scoping so that the resultant graph is easy to read,\n and adds a number of summary ops.\n \"\"\"\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n variable_summaries(biases)\n with tf.name_scope('Wx_plus_b'):\n # SSY /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/math_ops.py\n input_tensor = id_bf16cut_fp(input_tensor)\n weights = id_bf16cut_fp(weights)\n preactivate = tf.matmul(input_tensor, weights) + biases\n preactivate=id_bf16cut_bp(preactivate)\n tf.summary.histogram('pre_activations', preactivate)\n activations = act(preactivate, name='activation')\n tf.summary.histogram('activations', activations)\n return activations\n\n hidden1 = nn_layer(x, 784, 500, 'layer1')\n\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n tf.summary.scalar('dropout_keep_probability', keep_prob)\n #dropped = tf.nn.dropout(hidden1, rate=(1 - keep_prob))\n dropped = tf.nn.dropout(hidden1, keep_prob=keep_prob)\n\n # Do not apply softmax activation yet, see below.\n y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)\n\n with tf.name_scope('cross_entropy'):\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.math.log(tf.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.losses.sparse_softmax_cross_entropy on the\n # raw logit outputs of the nn_layer above, and then average across\n # the batch.\n with tf.name_scope('total'):\n cross_entropy = tf.losses.sparse_softmax_cross_entropy(\n labels=y_, logits=y)\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n with tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(\n cross_entropy)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(input=y, axis=1), y_)\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prediction,\n tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n # Merge all the summaries and write them out to\n # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train',\n sess.graph)\n test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\n tf.global_variables_initializer().run()\n\n # Train the model, and also write summaries.\n # Every 10th step, measure test-set accuracy, and write test summaries\n # All other steps, run train_step on training data, & add training summaries\n\n def feed_dict(train):\n \"\"\"Make a TensorFlow feed_dict: maps data onto Tensor placeholders.\"\"\"\n if train or FLAGS.fake_data:\n xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)\n k = FLAGS.dropout\n else:\n xs, ys = mnist.test.images, mnist.test.labels\n k = 1.0\n return {x: xs, y_: ys, keep_prob: k}\n\n for i in range(FLAGS.max_steps):\n if i % 10 == 0: # Record summaries and test-set accuracy\n summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n else: # Record train set summaries, and train\n if i % 100 == 99: # Record execution stats\n run_options = tf.RunOptions(\n trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([merged, train_step],\n feed_dict=feed_dict(True),\n options=run_options,\n run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n print('Adding run metadata for', i)\n else: # Record a summary\n summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))\n train_writer.add_summary(summary, i)\n train_writer.close()\n test_writer.close()\n\n\ndef main(_):\n if tf.gfile.Exists(FLAGS.log_dir):\n tf.gfile.DeleteRecursively(FLAGS.log_dir)\n tf.gfile.MakeDirs(FLAGS.log_dir)\n with tf.Graph().as_default():\n train()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--fake_data', nargs='?', const=True, type=bool,\n default=False,\n help='If true, uses fake data for unit testing.')\n parser.add_argument('--max_steps', type=int, default=1000,\n help='Number of steps to run trainer.')\n parser.add_argument('--learning_rate', type=float, default=0.001,\n help='Initial learning rate')\n parser.add_argument('--dropout', type=float, default=0.9,\n help='Keep probability for training dropout.')\n parser.add_argument(\n '--data_dir',\n type=str,\n default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/mnist/input_data'),\n help='Directory for storing input data')\n parser.add_argument(\n '--log_dir',\n type=str,\n default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/mnist/logs/mnist_with_summaries'),\n help='Summaries log directory')\n FLAGS, unparsed = parser.parse_known_args()\n # I am using tf 1.18 so I dont need compat v1\n #tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.gfile.DeleteRecursively",
"tensorflow.gfile.Exists",
"tensorflow.RunMetadata",
"tensorflow.cast",
"tensorflow.gfile.MakeDirs",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.summary.image",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.dropout",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.InteractiveSession",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.summary.histogram",
"tensorflow.reduce_max",
"tensorflow.summary.FileWriter",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.reduce_min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
laket/ape-x | [
"8ccb4192206d9529b5105e9fffd3cff143f48864"
] | [
"replay_buffer_actor.py"
] | [
"'''\nCopyright (c) 2018 Uber Technologies, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport tensorflow as tf\nimport numpy as np\n\nimport models\nfrom ops.segment_tree import ShortTermBuffer\n\nfrom gym_tensorflow.wrappers.base import BaseWrapper\n\ndef make_masked_frame(frames, dones, data_format):\n \"\"\"doneなframesは0、それ以外はもとの値を持つTensor群を返す\n\n :param list[tf.Tensor] frames: ここのTensorはNCHWっぽい (data_formatに従う)\n :param tuple[tf.Tensor] dones: ここのTensorはframes[i]の0次元目の長さと一致するbool\n :param data_format:\n :return:\n \"\"\"\n frames = list(frames[:])\n mask = None\n # donesを反転して次元を後ろに4つつける (4,) => (4,1,1,1)\n not_dones = [tf.cast(tf.logical_not(d), frames[0].dtype) if d is not None else None for d in dones]\n not_dones = [tf.expand_dims(d, axis=-1) if d is not None else None for d in not_dones]\n not_dones = [tf.expand_dims(d, axis=-1) if d is not None else None for d in not_dones]\n not_dones = [tf.expand_dims(d, axis=-1) if d is not None else None for d in not_dones]\n for i in np.flip(np.arange(len(frames) - 1), 0):\n if mask is None:\n mask = not_dones[i]\n else:\n mask = mask * not_dones[i]\n frames[i] = tf.image.convert_image_dtype(frames[i] * mask, tf.float32)\n frames[-1] = tf.image.convert_image_dtype(frames[-1], tf.float32)\n if data_format == 'NHWC':\n return tf.concat(frames, axis=-1, name='concat_masked_frames')\n elif data_format == 'NCHW':\n return tf.concat(frames, axis=-3, name='concat_masked_frames')\n else:\n raise NotImplementedError()\n\n\nclass ReplayBufferWrapper(BaseWrapper):\n \"\"\"行動をBufferに蓄積する環境\n\n BaseWrapperは環境用のクラス\n\n 利用例 (Prioritizedはこのクラスを継承している)\n PrioritizedReplayBufferWrapper(envs[actor_num], actor_num, actor_fifo, framestack, data_format, multi_step_n=multi_step_n)\n\n \"\"\"\n\n def __init__(self, env, actor_num, queue, num_stacked_frames, data_format):\n \"\"\"\n\n :param gym_tensorflow.atari.tf_atari.AtariEnv env: step等の関数を持つ環境 (AtariEnvとかくる)\n :param actor_num:\n :param tf.FIFOQueue queue:\n :param int num_stacked_frames: おそらく状態として何フレームを一括として扱うか\n :param data_format:\n \"\"\"\n super(ReplayBufferWrapper, self).__init__(env)\n self.queue = queue\n self.actor_num = actor_num\n self.num_stacked_frames = num_stacked_frames\n self.data_format = data_format\n\n with tf.device('/cpu:0'):\n if data_format == 'NCHW':\n obs_space = env.observation_space[0], env.observation_space[-1], env.observation_space[1], env.observation_space[2]\n else:\n obs_space = env.observation_space\n # 常にnum_stacked_framesをトラックする\n self.buffer = ShortTermBuffer(shapes=[obs_space, (env.batch_size,)], dtypes=[tf.uint8, tf.bool],\n framestack=num_stacked_frames, multi_step=0)\n\n @property\n def observation_space(self):\n return self.env.observation_space[:-1] + (self.env.observation_space[-1] * self.num_stacked_frames, )\n\n def observation(self, indices=None, reset=False, name=None):\n \"\"\"現在のstateを返す。ただし、num_stacked_frames分拡張されたobservationを返す\n\n :param indices: batchの中で一部のものをtrackしている場合かな? (どこで使っている?)\n :param reset: 未使用\n :param name: 未使用\n :return:\n \"\"\"\n assert indices is None\n obs = self.env.observation(indices)\n if self.data_format == 'NCHW':\n obs = tf.transpose(obs, (0, 3, 1, 2))\n\n with tf.device('/cpu:0'):\n _, recent_obs_done = self.buffer.encode_history()\n\n observations, dones=zip( * recent_obs_done[1 - self.num_stacked_frames:])\n observations += (obs,)\n dones += (None,)\n\n return make_masked_frame(observations, dones, self.data_format)\n\n def step(self, action, indices=None, name=None):\n assert indices is None\n sliced_act_obs = self.env.observation(indices)\n if self.data_format == 'NCHW':\n sliced_act_obs = tf.transpose(sliced_act_obs, (0, 3, 1, 2))\n\n sliced_act_obs = tf.image.convert_image_dtype(sliced_act_obs, tf.uint8)\n assert sliced_act_obs.dtype == tf.uint8\n\n with tf.device('/cpu:0'):\n _, recent_obs_done = self.buffer.encode_history()\n\n observations, dones=zip( * recent_obs_done[1 - self.num_stacked_frames:])\n observations += (sliced_act_obs,)\n dones += (None,)\n\n # 直近の4フレームをstateとしてまとめる\n obs = make_masked_frame(observations, dones, self.data_format)\n with tf.control_dependencies([sliced_act_obs]):\n # 1stepすすめる\n rew, done = self.env.step(action=action, indices=indices, name=name)\n # (入力画像, 完了済み)のペアをShortTermBufferに入れる\n # 遷移後のstateは次のstepなりobservationなりで取る思想っぽい\n update_recent_history = self.buffer.enqueue([sliced_act_obs, done])\n\n # 観測列をReplayBufferに入れる\n enqueue_op = self.queue.enqueue([obs, sliced_act_obs, rew, done, action, self.actor_num])\n\n with tf.control_dependencies([update_recent_history[0].op, enqueue_op]):\n return tf.identity(rew), tf.identity(done)\n\n\nclass PrioritizedReplayBufferWrapper(ReplayBufferWrapper):\n \"\"\"ReplayBuffer (Ape-X 所属)\n\n 呼び出し例\n PrioritizedReplayBufferWrapper(envs[actor_num], actor_num, actor_fifo, framestack, data_format, multi_step_n=multi_step_n)\n\n \"\"\"\n\n def __init__(self, *args, multi_step_n=None, **kwargs):\n super(PrioritizedReplayBufferWrapper, self).__init__(*args, **kwargs)\n self.transition_buffer = None\n self.multi_step_n = multi_step_n\n\n @classmethod\n def get_buffer_dtypes(cls, multi_step_n, framestack):\n return [tf.uint8, tf.float32, tf.bool, tf.int32, tf.float32, tf.float32] * (multi_step_n + framestack)\n\n @classmethod\n def get_buffer_shapes(cls, env, multi_step_n, num_stacked_frames, data_format):\n b = (env.batch_size,)\n if data_format == 'NCHW':\n obs_space = env.observation_space[-1], env.observation_space[1], env.observation_space[2]\n else:\n obs_space = env.observation_space[1:]\n shapes = [\n obs_space, # Image\n (), # Reward\n (), # Done\n (), # Action\n (env.action_space,), # Q Values\n (), # Selected Q Value\n ]\n shapes = [b + s for s in shapes]\n return shapes * (multi_step_n + num_stacked_frames)\n\n def step(self, action, indices=None, name=None, q_values=None, q_t_selected=None):\n \"\"\"環境を1stepすすめる\n\n 呼び出し例\n env.step(output_actions, q_values=q_values, q_t_selected=q_t_selected)\n\n\n :param tf.Tensor action: 選んだアクション [batch_size]\n :param indices:\n :param name:\n :param tf.Tensor q_values: 各アクションのQ(s,a) [batch_size, num_actions]\n :param tf.Tensor q_t_selected: 選んだアクションの評価値 [batch_size]\n :return:\n \"\"\"\n\n assert indices is None\n assert q_values is not None\n assert q_t_selected is not None\n batch_size = self.env.batch_size\n # NHWCの画像がとれる\n sliced_act_obs = self.env.observation(indices)\n if self.data_format == 'NCHW':\n sliced_act_obs = tf.transpose(sliced_act_obs, (0, 3, 1, 2))\n\n sliced_act_obs = tf.image.convert_image_dtype(sliced_act_obs, tf.uint8)\n assert sliced_act_obs.dtype == tf.uint8\n\n with tf.device('/cpu:0'):\n _, recent_obs_done = self.buffer.encode_history()\n\n # 最後のnum_stacked_frames-1分だけrecent_obs_doneからとってくる\n observations, dones=zip( * recent_obs_done[1 - self.num_stacked_frames:])\n # 最新の観測を足す Invadorだと(4,1,84,84)が4つのlist\n observations += (sliced_act_obs,)\n # (4,)のboolが4つのlist\n dones += (None,)\n\n obs = make_masked_frame(observations, dones, self.data_format)\n with tf.control_dependencies([sliced_act_obs]):\n rew, done = self.env.step(action=action, indices=indices, name=name)\n update_recent_history = self.buffer.enqueue([sliced_act_obs, done])\n\n # (action前状態, 報酬, 終わったかどうか, 選択したアクション, Q[batch_size,num_action], 選んだアクションの価値[batch_size])\n current_frame = sliced_act_obs, rew, done, action, q_values, q_t_selected\n if self.transition_buffer is None:\n with tf.control_dependencies(None):\n with tf.device('/cpu:0'):\n self.transition_buffer = ShortTermBuffer(shapes=[v.get_shape() for v in current_frame], dtypes=[v.dtype for v in current_frame], framestack=self.num_stacked_frames, multi_step=self.multi_step_n)\n\n # ShortTermBufferに現在の状態を足す\n # historyにはnum_stacked_frame+multi-step分のcurrent_frame列が入る\n is_valid, history = self.transition_buffer.enqueue(current_frame)\n\n history = [e for t in history for e in t]\n replay_queue_shapes = [(None,) + tuple(a.get_shape()[1:]) for a in history]\n\n enqueue_op = tf.cond(is_valid, lambda: self.queue.enqueue(history), tf.no_op)\n\n with tf.control_dependencies([enqueue_op, update_recent_history[0].op]):\n return tf.identity(rew), tf.identity(done)\n"
] | [
[
"tensorflow.device",
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.identity",
"tensorflow.expand_dims",
"tensorflow.image.convert_image_dtype",
"tensorflow.logical_not"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
wahyutirta/CNN-numpy | [
"d66e10a53304a0c72c40f278486866493f573d5e"
] | [
"main-1.1.py"
] | [
"from PyQt5.QtWidgets import *\nimport sys,pickle\nimport os\n\nfrom PyQt5 import uic, QtWidgets ,QtCore, QtGui\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import QDir, Qt, QSortFilterProxyModel\nfrom PyQt5.QtWidgets import QDialog ,QApplication, QFileDialog, QWidget, QTextEdit, QLabel\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\nfrom PyQt5.QtGui import QImage\nimport cv2, imutils\nfrom einops import rearrange, reduce, repeat\nfrom lenet5 import *\nimport numpy as np\n\nimport matplotlib as plt\nplt.use('Qt5Agg')\n#matplotlib.use('Qt5Agg')\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.ticker as ticker\n\n\nmain_path = os.path.dirname(os.path.abspath(__file__)) #file path main.py\nwork_path = os.path.split(main_path) #path working folder (whole file project)\nui_folder = os.path.join(main_path,\"ui/\") #ui_folder path\n\n\nclass MplCanvas(FigureCanvas):\n def __init__(self, parent=None, width=4, height=7, dpi=100):\n self.fig = Figure(figsize=(width, height), dpi=dpi)\n \n \n super(MplCanvas, self).__init__(self.fig)\n #self.fig.tight_layout()\n \n\t\t\n\n\nclass error_window(QMainWindow):\n def __init__(self):\n super(error_window, self).__init__()\n\n\nclass App(QMainWindow):\n def __init__(self):\n super(App, self).__init__()\n\n self.ui = uic.loadUi(os.path.join(ui_folder,\"main2.ui\"), self)\n \n self.filePath = None\n self.methods = [\"adam\", \"rmsprop\"]\n self.learningRate = [\"0.001\", \"0.0001\"]\n self.batch = [\"32\"]\n self.epochs = [\"101\", \"151\", \"201\"]\n \n self.output = None\n \n self.optimizerCombo.addItems(self.methods)\n self.learningRateCombo.addItems(self.learningRate)\n self.epochsCombo.addItems(self.epochs)\n self.batchCombo.addItems(self.batch)\n \n self.lenet = None\n if self.lenet == None:\n self.modelLabel.setText(\"No Model\")\n \n self.openImageBtn.clicked.connect(self.browseImage)\n self.loadModelBtn.clicked.connect(self.browseModel)\n self.recogImageBtn.clicked.connect(self.predictImage)\n imagePath = \"data_jepun\"\n self.data = Data(main_path, imagePath)\n self.label = self.data.loadLabel()\n \n self.optimizerCombo.currentIndexChanged.connect(self.resetModel)\n self.learningRateCombo.currentIndexChanged.connect(self.resetModel)\n self.epochsCombo.currentIndexChanged.connect(self.resetModel)\n self.batchCombo.currentIndexChanged.connect(self.resetModel)\n \n \n def resetModel(self):\n self.lenet = None\n \n if self.lenet == None:\n self.output = self.modelLabel.setText(\"No Model\")\n print(\"model null\")\n \n\n def browseImage(self):\n self.filePath = QFileDialog.getOpenFileName(filter=\"Image (*.*)\")[0]\n _, self.fname = os.path.split(self.filePath)\n self.textFname.setText(self.fname)\n print(self.filePath) \n self.image = cv2.imread(self.filePath)\n self.setPhoto(self.image)\n \n #clear canvas\n self.canvas1 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas1, 1, 6, 1, 1)\n self.canvas1.fig.clf()\n \n self.canvas2 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas2, 1, 7, 1, 1)\n self.canvas2.fig.clf()\n\n\n \n def setPhoto(self,image):\n \"\"\" This function will take image input and resize it \n\t\t\tonly for display purpose and convert it to QImage\n\t\t\tto set at the label.\n\t\t\"\"\"\n self.tmp = image\n image = imutils.resize(image,width=300)\n frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = QImage(frame, frame.shape[1],frame.shape[0],frame.strides[0],QImage.Format_RGB888)\n self.imageSpace.setPixmap(QtGui.QPixmap.fromImage(image))\n \n \n def browseModel(self):\n \n method = self.optimizerCombo.currentText()\n learningRate = self.learningRateCombo.currentText()\n epochs = self.epochsCombo.currentText()\n batch = self.batchCombo.currentText()\n print(method, learningRate, epochs, batch)\n self.lenet = LENET5( method = method, epochs = epochs, batch = batch, learningRate = learningRate) \n \n self.lenet.load_parameters(mainPath=main_path,epochs=epochs,method=method, batch=batch, learningRate=learningRate)\n if self.lenet != None:\n self.output = self.modelLabel.setText(\"Model Loaded\")\n \n def predictImage(self):\n self.output = self.lenet.one_image(self.lenet.layers, self.filePath)\n\n indeks = np.argmax(self.output)\n\n self.predLabel.setText(self.label[indeks])\n pribability = str(self.output[0,indeks] * 100)\n self.probLabel.setText(str(pribability + \"%\"))\n \n features1 = self.lenet.displayFeature(self.lenet.layers, self.filePath, 1)\n features1 = features1.astype(np.uint8)\n self.features1 = features1\n \n features2 = self.lenet.displayFeature(self.lenet.layers, self.filePath, 2)\n features2 = features2.astype(np.uint8)\n self.canvasManager(features1,features2)\n \n def canvasManager(self,features1, features2):\n \n self.canvas1 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas1, 1, 6, 1, 1)\n App.plot(self.canvas1,features1)\n \n self.canvas2 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas2, 1, 7, 1, 1)\n App.plot(self.canvas2,features2)\n\n \"\"\"\n rows = 3\n columns = 2\n counter = 1\n print(features.shape)\n for feature in features:\n \n print(feature)\n title = str(\"feature \" + str(counter))\n self.canvas.axes = self.canvas.fig.add_subplot(rows, columns, counter)\n \n \n self.canvas.axes.imshow(feature)\n self.canvas.axes.axis(\"off\")\n self.canvas.axes.set_title(title)\n counter += 1\n \n self.canvas.draw()\n \"\"\"\n @staticmethod\n def plot(canvas,features):\n\n rows = 3\n columns = 2\n counter = 1\n print(features.shape)\n for feature in features:\n \n print(feature)\n title = str(\"feature \" + str(counter))\n canvas.axes = canvas.fig.add_subplot(rows, columns, counter)\n \n \n canvas.axes.imshow(feature)\n canvas.axes.axis(\"off\")\n canvas.axes.set_title(title)\n counter += 1\n \n canvas.draw()\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = App()\nwidget = QtWidgets.QStackedWidget()\nwidget.addWidget(window)\nwidget.setFixedWidth(1070)\nwidget.setFixedHeight(660)\nwidget.show()\napp.exec_()\n#sys.exit( app.exec_() )\n\n\n"
] | [
[
"matplotlib.use",
"numpy.argmax",
"matplotlib.figure.Figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huskermiao/MaizeLeafCounting | [
"68d3d8e8bebf2dc74f2aa79a3fc62aca67de1dbb"
] | [
"CountingByDetection_FasterRCNNs/cocoeval.py"
] | [
"__author__ = 'tsungyi'\n\nimport numpy as np\nimport datetime\nimport time\nfrom collections import defaultdict\nimport mask as maskUtils\nimport copy\n\n\nclass COCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = p.kpt_oks_sigmas\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()\n\nclass Params:\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None"
] | [
[
"numpy.logical_not",
"numpy.spacing",
"numpy.unique",
"numpy.cumsum",
"numpy.ones",
"numpy.concatenate",
"numpy.round",
"numpy.max",
"numpy.mean",
"numpy.count_nonzero",
"numpy.searchsorted",
"numpy.exp",
"numpy.argsort",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yalov4uk/ML-labs | [
"ca944610614c182259783449d9ec6e9135d6aaf1"
] | [
"5/download.py"
] | [
"import os\nimport tarfile\nimport email\nimport re\nimport nltk\nimport urlextract\nimport numpy as np\nimport scipy.io as sio\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom nltk.stem import PorterStemmer\nfrom html import unescape\nfrom email import parser\nfrom email.policy import default\nfrom six.moves import urllib\nfrom collections import Counter\n\nDOWNLOAD_ROOT = \"http://spamassassin.apache.org/old/publiccorpus/\"\nHAM_URL = DOWNLOAD_ROOT + \"20030228_easy_ham.tar.bz2\"\nSPAM_URL = DOWNLOAD_ROOT + \"20030228_spam.tar.bz2\"\nSPAM_PATH = os.path.join(\"datasets\", \"spam\")\n\n\ndef fetch_spam_data(spam_url=SPAM_URL, spam_path=SPAM_PATH):\n if not os.path.isdir(spam_path):\n os.makedirs(spam_path)\n for filename, url in ((\"ham.tar.bz2\", HAM_URL), (\"spam.tar.bz2\", SPAM_URL)):\n path = os.path.join(spam_path, filename)\n if not os.path.isfile(path):\n urllib.request.urlretrieve(url, path)\n tar_bz2_file = tarfile.open(path)\n tar_bz2_file.extractall(path=SPAM_PATH)\n tar_bz2_file.close()\n\n\n# fetch_spam_data()\n\nHAM_DIR = os.path.join(SPAM_PATH, \"easy_ham\")\nSPAM_DIR = os.path.join(SPAM_PATH, \"spam\")\nham_filenames = [name for name in sorted(os.listdir(HAM_DIR)) if len(name) > 20]\nspam_filenames = [name for name in sorted(os.listdir(SPAM_DIR)) if len(name) > 20]\n\nprint(len(ham_filenames))\nprint(len(spam_filenames))\n\n\ndef load_email(is_spam, filename, spam_path=SPAM_PATH):\n directory = \"spam\" if is_spam else \"easy_ham\"\n with open(os.path.join(spam_path, directory, filename), \"rb\") as f:\n return parser.BytesParser(policy=email.policy.default).parse(f)\n\n\nham_emails = [load_email(is_spam=False, filename=name) for name in ham_filenames]\nspam_emails = [load_email(is_spam=True, filename=name) for name in spam_filenames]\n\nprint(ham_emails[4].get_content().strip())\nprint(spam_emails[5].get_content().strip())\n\n\ndef get_email_structure(email):\n if isinstance(email, str):\n return email\n payload = email.get_payload()\n if isinstance(payload, list):\n return \"multipart({})\".format(\", \".join([\n get_email_structure(sub_email)\n for sub_email in payload\n ]))\n else:\n return email.get_content_type()\n\n\ndef structures_counter(emails):\n structures = Counter()\n for email in emails:\n structure = get_email_structure(email)\n structures[structure] += 1\n return structures\n\n\nprint(structures_counter(ham_emails).most_common())\nprint('\\n')\nprint(structures_counter(spam_emails).most_common())\n\nfor header, value in spam_emails[0].items():\n print(header, \":\", value)\n\n\ndef html_to_plain_text(html):\n text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I)\n text = re.sub('<a\\s.*?>', ' httpaddr ', text, flags=re.M | re.S | re.I)\n text = re.sub('<.*?>', '', text, flags=re.M | re.S)\n text = re.sub(r'(\\s*\\n)+', '\\n', text, flags=re.M | re.S)\n return unescape(text)\n\n\nhtml_spam_emails = [email for email in spam_emails\n if get_email_structure(email) == \"text/html\"]\n\nsample_html_spam = html_spam_emails[7]\nprint(\"\\nSpam email html sample:\\n\")\nprint(sample_html_spam.get_content().strip()[:1000], \"...\")\nprint(\"\\nEmail content: \\n\")\nprint(html_to_plain_text(sample_html_spam.get_content())[:1000], \"...\")\n\n\ndef email_to_text(email):\n html = None\n for part in email.walk():\n ctype = part.get_content_type()\n if not ctype in (\"text/plain\", \"text/html\"):\n continue\n try:\n content = part.get_content()\n except: # in case of encoding issues\n content = str(part.get_payload())\n if ctype == \"text/plain\":\n return content\n else:\n html = content\n if html:\n return html_to_plain_text(html)\n\n\nprint(email_to_text(sample_html_spam)[:100], \"...\")\n\ntry:\n stemmer = nltk.PorterStemmer()\n for word in (\"Computations\", \"Computation\", \"Computing\", \"Computed\", \"Compute\", \"Compulsive\"):\n print(word, \"=>\", stemmer.stem(word))\nexcept ImportError:\n print(\"Error: stemming requires the NLTK module.\")\n stemmer = None\n\ntry:\n url_extractor = urlextract.URLExtract()\n print(url_extractor.find_urls(\"Will it detect github.com and https://youtu.be/7Pq-S557XQU?t=3m32s\"))\nexcept ImportError:\n print(\"Error: replacing URLs requires the urlextract module.\")\n url_extractor = None\n\n\nclass EmailToWordCounterTransformer(BaseEstimator, TransformerMixin):\n def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True,\n replace_urls=True, replace_numbers=True, stemming=True):\n self.strip_headers = strip_headers\n self.lower_case = lower_case\n self.remove_punctuation = remove_punctuation\n self.replace_urls = replace_urls\n self.replace_numbers = replace_numbers\n self.stemming = stemming\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X_transformed = []\n for email in X:\n text = email_to_text(email) or \"\"\n if self.lower_case:\n text = text.lower()\n text = re.sub(\"[$]+\", \" dollar \", text)\n text = re.sub(\"[^\\s]+@[^\\s]+\", \" emailaddr \", text)\n if self.replace_urls and url_extractor is not None:\n urls = list(set(url_extractor.find_urls(text)))\n urls.sort(key=lambda url: len(url), reverse=True)\n for url in urls:\n text = text.replace(url, \" httpaddr \")\n if self.replace_numbers:\n text = re.sub(r'\\d+(?:\\.\\d*(?:[eE]\\d+))?', 'NUMBER', text)\n if self.remove_punctuation:\n text = re.sub(r'\\W+', ' ', text, flags=re.M)\n special_chars = [\n \"<\", \"[\", \"^\", \">\", \"+\", \"?\", \"!\", \"'\", \".\", \",\", \":\",\n \"*\", \"%\", \"#\", \"_\", \"=\"\n ]\n for char in special_chars:\n text = text.replace(str(char), \"\")\n word_counts = Counter(text.split())\n if self.stemming and stemmer is not None:\n stemmed_word_counts = Counter()\n for word, count in word_counts.items():\n stemmed_word = stemmer.stem(word)\n stemmed_word_counts[stemmed_word] += count\n word_counts = stemmed_word_counts\n X_transformed.append(word_counts)\n return np.array(X_transformed)\n\n\nvocab = EmailToWordCounterTransformer().fit_transform(spam_emails)\nvocab = sum(vocab, Counter())\n\nlist = vocab.most_common(1904)\nvocab = []\nfor (k, v) in list:\n vocab.append(k)\n\nvocab = sorted(vocab)\n\n# SAVE DICTIONARY\ni = 0\nwith open('../data/vocab2.txt', 'w') as f:\n for item in vocab:\n try:\n f.write(\"%s\\t%s\\n\" % (i, item))\n i += 1\n except:\n print('error')\n\nsamples = len(ham_filenames) + len(spam_filenames)\n\nvocabList = open('../data/vocab2.txt', \"r\").read()\nvocabList = vocabList.split(\"\\n\")\nvocabList_d = {}\nfor ea in vocabList:\n if ea:\n [value, key] = ea.split(\"\\t\")\n vocabList_d[key] = value\n\nprint(vocabList_d)\nprint(email_to_text(spam_emails[0]))\n\n\ndef process_email(email_contents):\n \"\"\"\n Preprocesses the body of an email and returns a list of indices of the words contained in the email.\n \"\"\"\n # a - Lower case\n email_contents = email_contents.lower()\n\n # b - remove html/xml tags\n email_contents = re.sub(\"<[^>]*>\", \" \", email_contents).split(\" \")\n email_contents = filter(len, email_contents)\n email_contents = ' '.join(email_contents)\n\n # c - Handle URLS\n email_contents = re.sub(\"[http|https]://[^\\s]*\", \"httpaddr\", email_contents)\n\n # d - Handle Email Addresses\n email_contents = re.sub(\"[^\\s]+@[^\\s]+\", \"emailaddr\", email_contents)\n\n # e - Handle numbers\n email_contents = re.sub(\"[0-9]+\", \"number\", email_contents)\n\n # f - Handle $ sign\n email_contents = re.sub(\"[$]+\", \"dollar\", email_contents)\n\n # Strip all special characters\n special_chars = [\n \"<\", \"[\", \"^\", \">\", \"+\", \"?\", \"!\", \"'\", \".\", \",\", \":\",\n \"*\", \"%\", \"#\", \"_\", \"=\"\n ]\n for char in special_chars:\n email_contents = email_contents.replace(str(char), \"\")\n email_contents = email_contents.replace(\"\\n\", \" \")\n\n # Stem the word\n ps = PorterStemmer()\n email_contents = [ps.stem(token) for token in email_contents.split(\" \")]\n email_contents = \" \".join(email_contents)\n\n return email_contents\n\n\ndef find_word_indices(processed_email, vocabList_d):\n # Process the email and return word_indices\n\n word_indices = []\n\n for char in processed_email.split():\n if len(char) > 1 and char in vocabList_d:\n word_indices.append(int(vocabList_d[char]))\n\n return word_indices\n\n\ndef email_features(word_indices, vocabList_d):\n \"\"\"\n Takes in a word_indices vector and produces a feature vector from the word indices.\n \"\"\"\n n = len(vocabList_d)\n\n features = np.zeros((n, 1))\n\n for i in word_indices:\n features[i] = 1\n\n return features\n\n\ndef transform_email_to_features(email_contents, vocabList_d):\n # print(email_contents)\n processed_email = process_email(email_contents)\n word_indices = find_word_indices(processed_email, vocabList_d)\n features = email_features(word_indices, vocabList_d)\n\n return features\n\n\n# train\nX = []\nY = []\n\nprint(len(spam_emails))\nprint(len(ham_emails))\n\nfor i in range(400):\n sp = email_to_text(spam_emails[i])\n if sp:\n a = transform_email_to_features(sp, vocabList_d)\n X.append(a.flatten())\n Y.append(1)\nfor i in range(2000):\n em = email_to_text(ham_emails[i])\n if em:\n X.append(transform_email_to_features(em, vocabList_d).flatten())\n Y.append(0)\n\nsio.savemat('../data/myTrain.mat', {'X': X, 'y': Y})\n\n# test\nX = []\nY = []\n\nfor i in range(401, 500, 1):\n sp = email_to_text(spam_emails[i])\n if sp:\n a = transform_email_to_features(sp, vocabList_d)\n X.append(a.flatten())\n Y.append(1)\nfor i in range(2001, 2500, 1):\n em = email_to_text(ham_emails[i])\n if em:\n X.append(transform_email_to_features(em, vocabList_d).flatten())\n Y.append(0)\n\nsio.savemat('../data/myTest.mat', {'Xtest': X, 'ytest': Y})\n"
] | [
[
"numpy.array",
"numpy.zeros",
"scipy.io.savemat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
ZhaoJY1/nussl | [
"af7d0c50e01d107f4ef3305b89eb130d95d0a7cd",
"af7d0c50e01d107f4ef3305b89eb130d95d0a7cd",
"af7d0c50e01d107f4ef3305b89eb130d95d0a7cd"
] | [
"tests/ml/test_overfit.py",
"docs/examples/benchmark/ideal_binary_mask.py",
"nussl/separation/deep/deep_clustering.py"
] | [
"from nussl import ml, datasets, evaluation\nimport tempfile\nfrom torch import optim\nimport numpy as np\nimport logging\nimport os\nimport torch\nfrom matplotlib import pyplot as plt\n\nlogging.basicConfig(\n format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d:%H:%M:%S',\n level=logging.INFO\n)\n\nfix_dir = 'tests/local/trainer'\n\n\ndef test_overfit_a(mix_source_folder):\n tfms = datasets.transforms.Compose([\n datasets.transforms.PhaseSensitiveSpectrumApproximation(),\n datasets.transforms.ToSeparationModel(),\n datasets.transforms.Cache('~/.nussl/tests/cache', overwrite=True),\n datasets.transforms.GetExcerpt(400)\n ])\n dataset = datasets.MixSourceFolder(\n mix_source_folder, transform=tfms)\n\n ml.train.cache_dataset(dataset)\n dataset.cache_populated = True\n\n dataloader = torch.utils.data.DataLoader(\n dataset, shuffle=True, batch_size=len(dataset), num_workers=2)\n\n # create the model, based on the first item in the dataset\n # second bit of the shape is the number of features\n n_features = dataset[0]['mix_magnitude'].shape[1]\n mi_config = ml.networks.builders.build_recurrent_mask_inference(\n n_features, 50, 1, False, 0.0, 2, 'sigmoid',\n )\n\n model = ml.SeparationModel(mi_config)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n if device == 'cuda':\n epoch_length = 100\n else:\n epoch_length = 10\n model = model.to(device)\n # create optimizer\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n loss_dictionary = {\n 'L1Loss': {\n 'weight': 1.0\n }\n }\n\n train_closure = ml.train.closures.TrainClosure(\n loss_dictionary, optimizer, model)\n val_closure = ml.train.closures.ValidationClosure(\n loss_dictionary, model)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n _dir = fix_dir if fix_dir else tmpdir\n os.makedirs(os.path.join(_dir, 'plots'), exist_ok=True)\n\n trainer, validator = ml.train.create_train_and_validation_engines(\n train_closure, val_closure, device=device\n )\n\n # add handlers to engine\n ml.train.add_stdout_handler(trainer, validator)\n ml.train.add_validate_and_checkpoint(\n _dir, model, optimizer, dataset,\n trainer, val_data=dataloader, validator=validator)\n ml.train.add_tensorboard_handler(_dir, trainer)\n\n # run engine\n trainer.run(dataloader, max_epochs=5, epoch_length=epoch_length)\n\n model_path = os.path.join(\n trainer.state.output_folder, 'checkpoints', 'best.model.pth')\n state_dict = torch.load(\n model_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(state_dict['state_dict'])\n\n history = state_dict['metadata']['trainer.state.epoch_history']\n\n for key in history:\n plt.figure(figsize=(10, 4))\n plt.title(f\"epoch:{key}\")\n plt.plot(np.array(history[key]).reshape(-1, ))\n plt.savefig(os.path.join(\n trainer.state.output_folder, 'plots',\n f\"epoch:{key.replace('/', ':')}.png\"))\n\n",
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.5.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Ideal Binary Mask\n#\n\n# +\nimport nussl\nimport matplotlib.pyplot as plt\nimport time\n\nstart_time = time.time()\n\ndef visualize_and_embed(sources):\n plt.figure(figsize=(10, 6))\n plt.subplot(211)\n nussl.utils.visualize_sources_as_masks(sources,\n y_axis='mel', db_cutoff=-40, alpha_amount=2.0)\n plt.subplot(212)\n nussl.utils.visualize_sources_as_waveform(\n sources, show_legend=False)\n plt.show()\n nussl.play_utils.multitrack(sources)\n\nmusdb = nussl.datasets.MUSDB18(\n download=True, sample_rate=16000,\n strict_sample_rate = False\n)\ni = 39\nitem = musdb[i]\nmix = item['mix']\nsource_names = sorted(list(item['sources'].keys()))\nsources = [item['sources'][k] for k in source_names]\n# -\n\nseparator = nussl.separation.benchmark.IdealBinaryMask(\n mix, sources)\nestimates = separator()\nestimates = {\n source_names[i]: e for i, e in enumerate(estimates)\n}\nvisualize_and_embed(estimates)\n\nend_time = time.time()\ntime_taken = end_time - start_time\nprint(f'Time taken: {time_taken:.4f} seconds')\n",
"import torch\n\nfrom ..base import ClusteringSeparationBase, DeepMixin, SeparationException\n\n\nclass DeepClustering(DeepMixin, ClusteringSeparationBase):\n \"\"\"\n Clusters the embedding produced by a deep model for every time-frequency point.\n This is the deep clustering source separation approach. It is flexible with\n the number of sources. It expects that the model outputs a dictionary where one\n of the keys is 'embedding'. This uses the `DeepMixin` class to load the model\n and set the audio signal's parameters to be appropriate for the model.\n \n Args:\n input_audio_signal: (AudioSignal`) An AudioSignal object containing the \n mixture to be separated.\n num_sources (int): Number of sources to cluster the features of and separate\n the mixture.\n model_path (str, optional): Path to the model that will be used. Can be None, \n so that you can initialize a class and load the model later. \n Defaults to None.\n device (str, optional): Device to put the model on. Defaults to 'cpu'.\n extra_data (dict, optional): Any extra data that is to be passed at runtime\n to the SeparationModel.\n **kwargs (dict): Keyword arguments for ClusteringSeparationBase and the \n clustering object used for clustering (one of KMeans, GaussianMixture,\n MiniBatchKmeans).\n \n Raises:\n SeparationException: If 'embedding' isn't in the output of the model.\n \"\"\"\n def __init__(self, input_audio_signal, num_sources, model_path=None,\n device='cpu', extra_data=None, **kwargs):\n super().__init__(input_audio_signal, num_sources, **kwargs)\n if model_path is not None:\n self.load_model(model_path, device=device)\n # audio channel dimension in a dpcl model\n self.channel_dim = -1\n self.extra_data = extra_data\n\n def forward(self):\n return self.extract_features()\n\n def extract_features(self):\n input_data = self._get_input_data_for_model(self.extra_data)\n with torch.no_grad():\n output = self.model(input_data)\n if 'embedding' not in output:\n raise SeparationException(\n \"This model is not a deep clustering model! \"\n \"Did not find 'embedding' key in output dictionary.\")\n embedding = output['embedding']\n # swap back batch and sample dims\n if self.metadata['num_channels'] == 1:\n embedding = embedding.transpose(0, -2)\n embedding = embedding.squeeze(0).transpose(0, 1)\n self._preprocess_audio_signal()\n return embedding.cpu().data.numpy()\n"
] | [
[
"matplotlib.pyplot.title",
"torch.load",
"torch.cuda.is_available",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Wason1/Rows-to-columns-consolidator | [
"21ee991ba907ba61708d24acfc6d5b3a3e754677"
] | [
"script.py"
] | [
"#Import Libs\nimport pandas as pd\n\n# Inputs\n#col_name = input('What is the name of the column to convert to columns?: ')\n#keyz = input('what are the names of the columns that uniquely identify a row? (seperate these with pipes \"|\"):')\n#keyz.split('|')\nfile_dir = r'data.xlsx'\noutput_dir = r'data-out.xlsx'\nlist_key_cols = ['Person - Medical Record Number', 'CE-Verified DT/TM']\n# This is the column that you want to return the unique items and make those the unique iterms the headings for new columns\nsplit_col = 'Clinical Event'\n# Use this data to fill in the new columns\nfiller_col = 'Clinical Event Result'\n\n# Create Dataframe\ndf = pd.read_excel(\n file_dir\n )\n\n#convert to strings\ndf = df.applymap(str)\n# Make the primary key\nseries_primary_key = df[list_key_cols].sum(1)\ndf['primary-key']=series_primary_key\n# new base dataframe\ndfa = df.drop(\n [split_col, filler_col],\n axis='columns'\n )\n# drop duplicates\ndfa.drop_duplicates(\n keep='first',\n inplace=True,\n)\n\ndfa.set_index(keys='primary-key', inplace=True)\n\n# new columns\nnew_cols = pd.unique(df[split_col])\nfor item in new_cols:\n dfa[item]=''\n\n# Iterate and fill in dfa\nfor a_key in series_primary_key:\n for col_name in new_cols:\n df_temp = df.loc[df['primary-key'] == a_key]\n df_temp = df_temp.loc[df_temp[split_col] == col_name]\n df_temp.reset_index(inplace=True)\n try:\n cell_text = df_temp.at[0, filler_col]\n dfa.at[a_key, col_name] = cell_text\n except:\n print('1')\n\n# Export dataframe\ndfa.to_excel(output_dir, index = False)"
] | [
[
"pandas.read_excel",
"pandas.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ltqusst/lpc_vocoder | [
"baf29d40dcf9f4b80a73146dca939c7841045441"
] | [
"sws.py"
] | [
"## MIT License\n\n# Copyright (c) 2017 John Williamson\n# Copyright (c) 2008 Cournapeau David\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport numpy as np\nimport scipy, scipy.io, scipy.io.wavfile, scipy.signal\nimport os\nfrom pathlib import Path\nimport argparse\n\n# This function is copied directly from https://github.com/cournape/talkbox/blob/master/scikits/talkbox/linpred/py_lpc.py\n# Copyright (c) 2008 Cournapeau David\n# (MIT licensed)\ndef levinson_1d(r, order):\n \"\"\"Levinson-Durbin recursion, to efficiently solve symmetric linear systems\n with toeplitz structure.\n\n Parameters\n ---------\n r : array-like\n input array to invert (since the matrix is symmetric Toeplitz, the\n corresponding pxp matrix is defined by p items only). Generally the\n autocorrelation of the signal for linear prediction coefficients\n estimation. The first item must be a non zero real.\n\n Notes\n ----\n This implementation is in python, hence unsuitable for any serious\n computation. Use it as educational and reference purpose only.\n\n Levinson is a well-known algorithm to solve the Hermitian toeplitz\n equation:\n\n _ _\n -R[1] = R[0] R[1] ... R[p-1] a[1]\n : : : : * :\n : : : _ * :\n -R[p] = R[p-1] R[p-2] ... R[0] a[p]\n _\n with respect to a ( is the complex conjugate). Using the special symmetry\n in the matrix, the inversion can be done in O(p^2) instead of O(p^3).\n \"\"\"\n r = np.atleast_1d(r)\n if r.ndim > 1:\n raise ValueError(\"Only rank 1 are supported for now.\")\n\n n = r.size\n if n < 1:\n raise ValueError(\"Cannot operate on empty array !\")\n elif order > n - 1:\n raise ValueError(\"Order should be <= size-1\")\n\n if not np.isreal(r[0]):\n raise ValueError(\"First item of input must be real.\")\n elif not np.isfinite(1 / r[0]):\n raise ValueError(\"First item should be != 0\")\n\n # Estimated coefficients\n a = np.empty(order + 1, r.dtype)\n # temporary array\n t = np.empty(order + 1, r.dtype)\n # Reflection coefficients\n k = np.empty(order, r.dtype)\n\n a[0] = 1.0\n e = r[0]\n\n for i in range(1, order + 1):\n acc = r[i]\n for j in range(1, i):\n acc += a[j] * r[i - j]\n k[i - 1] = -acc / e\n a[i] = k[i - 1]\n\n for j in range(order):\n t[j] = a[j]\n\n for j in range(1, i):\n a[j] += k[i - 1] * np.conj(t[i - j])\n\n e *= 1 - k[i - 1] * np.conj(k[i - 1])\n\n return a, e, k\n\n\nfrom numpy.polynomial import polynomial as P\n\n\ndef lsp_to_lpc(lsp):\n \"\"\"Convert line spectral pairs to LPC\"\"\"\n ps = np.concatenate((lsp[:, 0], -lsp[::-1, 0], [np.pi]))\n qs = np.concatenate((lsp[:, 1], [0], -lsp[::-1, 1]))\n\n p = np.cos(ps) - np.sin(ps) * 1.0j\n q = np.cos(qs) - np.sin(qs) * 1.0j\n\n p = np.real(P.polyfromroots(p))\n q = -np.real(P.polyfromroots(q))\n\n a = 0.5 * (p + q)\n return a[:-1]\n\n\ndef lpc_noise_synthesize(lpc, samples=10000):\n \"\"\"Apply LPC coefficients to white noise\"\"\"\n phase = np.random.uniform(0, 0.5, (samples))\n signal = scipy.signal.lfilter([1.0], lpc, phase)\n return signal\n\n\ndef lpc_buzz_synthesize(lpc, f, sr, samples=10000):\n \"\"\"Apply LPC coefficients to a sawtooth with the given frequency and sample rate\"\"\"\n phase = scipy.signal.sawtooth(2 * np.pi * f * np.arange(samples) / (sr))\n signal = scipy.signal.lfilter([1.0], lpc, phase)\n return signal\n\n\ndef lpc_to_lsp(lpc):\n \"\"\"Convert LPC to line spectral pairs\"\"\"\n l = len(lpc) + 1\n a = np.zeros((l,))\n a[0:-1] = lpc\n p = np.zeros((l,))\n q = np.zeros((l,))\n for i in range(l):\n j = l - i - 1\n p[i] = a[i] + a[j]\n q[i] = a[i] - a[j]\n\n ps = np.sort(np.angle(np.roots(p)))\n qs = np.sort(np.angle(np.roots(q)))\n lsp = np.vstack([ps[: len(ps) // 2], qs[: len(qs) // 2]]).T\n return lsp\n\n\ndef lpc_to_formants(lpc, sr):\n \"\"\"Convert LPC to formants \n \"\"\"\n\n # extract roots, get angle and radius\n roots = np.roots(lpc)\n\n pos_roots = roots[np.imag(roots) >= 0]\n if len(pos_roots) < len(roots) // 2:\n pos_roots = list(pos_roots) + [0] * (len(roots) // 2 - len(pos_roots))\n if len(pos_roots) > len(roots) // 2:\n pos_roots = pos_roots[: len(roots) // 2]\n\n w = np.angle(pos_roots)\n a = np.abs(pos_roots)\n\n order = np.argsort(w)\n w = w[order]\n a = a[order]\n\n freqs = w * (sr / (2 * np.pi))\n bws = -0.5 * (sr / (2 * np.pi)) * np.log(a)\n\n # exclude DC and sr/2 frequencies\n return freqs, bws\n\n\ndef load_wave(fname):\n \"\"\"Load a 16 bit wave file and return normalised in 0,1 range.\n Convert stereo WAV to mono by simple averaging. \"\"\"\n # load and return a wave file\n sr, wave = scipy.io.wavfile.read(fname)\n # convert to mono\n if len(wave.shape) > 1:\n wave = np.mean(wave, axis=1)\n return wave / 32768.0, sr\n\n\ndef lpc(wave, order):\n \"\"\"Compute LPC of the waveform. \n a: the LPC coefficients\n e: the total error\n k: the reflection coefficients\n \n Typically only a is required.\n \"\"\"\n # only use right half of autocorrelation, normalised by total length\n autocorr = scipy.signal.correlate(wave, wave)[len(wave) - 1 :] / len(wave)\n a, e, k = levinson_1d(autocorr, order)\n return a, e, k\n\n\ndef modfm_buzz(samples, f, sr, k):\n \"\"\"Generate a pulse train using modfm:\n y(t) = cos(x(t)) * exp(cos(x(t))*k - k)\n \n samples: number of samples to generate\n f: base frequency (Hz)\n sr: sample rate (Hz)\n k: modulation depth; higher has more harmonics but increases risk of aliasing\n (e.g. k=1000 for f=50, k=100 for f=200, k=2 for f=4000) \n \n \"\"\"\n t = np.arange(samples)\n phase = f * 2 * np.pi * (t / float(sr))\n # simple pulse oscillator (ModFM)\n buzz = np.cos(phase) * np.exp(np.cos(phase) * k - k)\n return buzz\n\n\ndef noise(samples):\n \"\"\"Generate white noise in range [-1,1]\n \n samples: number of samples to generate\n \"\"\"\n return np.random.uniform(-1, 1, size=samples)\n\n\ndef lpc_vocode(\n wave,\n frame_len,\n order,\n carrier,\n residual_amp=0.0,\n vocode_amp=1.0,\n env=False,\n freq_shift=1.0,\n):\n \"\"\"\n Apply LPC vocoding to a pair of signals using 50% overlap-add Hamming window resynthesis\n The modulator `wave` is applied to the carrier `imposed`\n \n Parameters:\n ---\n wave: modulator wave\n frame_len: length of frames\n order: LPC order (typically 2-30)\n carrier: carrier signal; should be at least as long as wave\n residual_amp: amplitude of LPC residual to include in output\n vocode_amp: amplitude of vocoded signal \n env: if True, the original volume envelope of wave is imposed on the output\n otherwise, no volume modulation is applied\n freq_shift: (default 1.0) shift the frequency of the resonances by the given scale factor. Warning :\n values >1.1 are usually unstable, and values <0.5 likewise.\n \"\"\"\n\n # precompute the hamming window\n window = scipy.signal.hann(frame_len)\n t = np.arange(frame_len)\n # allocate the array for the output\n vocode = np.zeros(len(wave + frame_len))\n last = np.zeros(order)\n # 50% window steps for overlap-add\n for i in range(0, len(wave), frame_len // 2):\n # slice the wave\n wave_slice = wave[i : i + frame_len]\n carrier_slice = carrier[i : i + frame_len]\n if len(wave_slice) == frame_len:\n # compute LPC\n a, error, reflection = lpc(wave_slice, order)\n\n # apply shifting in LSP space\n lsp = lpc_to_lsp(a)\n lsp = (lsp * freq_shift + np.pi) % (np.pi) - np.pi\n a = lsp_to_lpc(lsp)\n\n # compute the LPC residual\n residual = scipy.signal.lfilter(a, 1.0, wave_slice)\n # filter, using LPC as the *IIR* component\n # vocoded, last = scipy.signal.lfilter([1.], a, carrier_slice, zi=last)\n vocoded = scipy.signal.lfilter([1.0], a, carrier_slice)\n\n # match RMS of original signal\n if env:\n voc_amp = 1e-5 + np.sqrt(np.mean(vocoded ** 2))\n wave_amp = 1e-5 + np.sqrt(np.mean(wave_slice ** 2))\n vocoded = vocoded * (wave_amp / voc_amp)\n\n # Hann window 50%-overlap-add to remove clicking\n vocode[i : i + frame_len] += (\n vocoded * vocode_amp + residual * residual_amp\n ) * window\n\n return vocode[: len(wave)]\n\n\ndef get_formants(wave, frame_len, order, sr=44100, use_lsp=False):\n \"\"\"Plot the formants of the given wave form.\n \n Parameters:\n wave: Signal to analyse, as a 1D matrix\n frame_len: Length of analysis window, in samples\n order: Order of the LPC analysis performed\n sr: Sample rate, in Hz\n use_lsp: If True, use the LSP formant estimation instead of direct LPC\n \n Plots both the formant trace and the relative RMS power of the residual signal.\n \"\"\"\n formants = []\n formant_bw = []\n times = []\n res_rms = []\n env = []\n for i in range(0, len(wave), frame_len // 2):\n # slice the wave\n wave_slice = wave[i : i + frame_len]\n if len(wave_slice) == frame_len:\n # compute LPC\n a, error, reflection = lpc(wave_slice, order)\n\n # either use LSP (freq from mean angle, bw from spacing)\n if use_lsp:\n lsp = lpc_to_lsp(a)\n\n formants.append(-np.mean(lsp, axis=1) * (sr / (2 * np.pi)))\n formant_bw.append(0.5 * np.diff(lsp, axis=1)[:, 0] * (sr / (2 * np.pi)))\n\n else:\n # or use roots of LPC directly\n freq, bw = lpc_to_formants(a, sr)\n formants.append(freq)\n formant_bw.append(bw)\n\n times.append(i / float(sr))\n\n # compute the LPC residual\n residual = scipy.signal.lfilter(a, 1.0, wave_slice)\n rms = np.sqrt(np.mean(wave_slice ** 2))\n residual_rms = np.sqrt(np.mean(residual ** 2))\n res_rms.append(residual_rms)\n env.append(rms)\n\n return (\n np.array(times),\n np.array(formants),\n np.array(formant_bw),\n np.array(res_rms),\n np.array(env),\n )\n\n\ndef sinethesise(wave, frame_len, order, sr=44100, use_lsp=False, noise=1.0):\n times, formants, formant_bw, res_rms, env_rms = get_formants(\n wave, frame_len, order, sr, use_lsp\n )\n synthesize = np.zeros_like(wave)\n window = scipy.signal.hann(frame_len)\n t = np.arange(frame_len)\n k = 0\n for i in range(0, len(wave), frame_len // 2):\n\n if len(synthesize[i : i + frame_len]) == frame_len:\n # noise component\n syn_slice = (\n np.random.normal(0, 1, frame_len) * (res_rms[k] / env_rms[k]) * noise\n )\n\n # resonances\n for band in range(formants.shape[1]):\n freq = formants[k, band]\n bw = formant_bw[k, band]\n amp = 50.0 / (bw) # weight sines by inverse bandwidth\n syn_slice += np.sin(freq * (t + i) / (sr / (2 * np.pi))) * amp\n\n synthesize[i : i + frame_len] += window * syn_slice * env_rms[k]\n k += 1\n return synthesize\n\n\ndef bp_filter_and_decimate(x, low, high, fs, decimate=1):\n b, a = scipy.signal.butter(4, Wn=[low, high], btype=\"band\", fs=fs)\n decimated = scipy.signal.filtfilt(b, a, x)[::decimate]\n return decimated\n\ndef normalize(x):\n return x / np.max(x) \n\n\ndef upsample(x, factor):\n return scipy.signal.resample_poly(x, factor, 1)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"input_wav\", help=\"The input file, as a WAV file; ideally 44.1KHz mono.\"\n )\n parser.add_argument(\n \"output_wav\",\n nargs=\"?\",\n help=\"The output file to write to; defaults to <input>_sws.wav\",\n default=None,\n )\n parser.add_argument(\"--lp\", help=\"Lowpass filter cutoff\", type=float, default=100)\n parser.add_argument(\"--hp\", help=\"Highpass filter cutoff\", type=float, default=3000)\n parser.add_argument(\n \"--order\", \"-o\", help=\"LPC order; number of components in synthesis\", default=5, type=int\n )\n parser.add_argument(\n \"--use_lsp\",\n \"-l\",\n help=\"LPC order; number of components in synthesis\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--decimate\", \"-d\", help=\"Sample rate decimation before analysis\", default=4, type=int\n )\n parser.add_argument(\n \"--window\",\n \"-w\",\n type=int,\n help=\"LPC window size; smaller means faster changing signal; larger is smoother\",\n default=300,\n )\n parser.add_argument(\n \"--sine\",\n \"-s\",\n help=\"Resynthesise using sinewave speech (default)\",\n action=\"store_true\",\n default=True,\n )\n parser.add_argument(\n \"--buzz\",\n \"-b\",\n help=\"Resynthesie using buzz at given frequency (Hz)\",\n default=None,\n )\n parser.add_argument(\n \"--noise\", \"-n\", help=\"Resynthesize using filtered white noise\", action=\"store_true\"\n )\n\n args = parser.parse_args()\n\n args.output_wav = (\n args.output_wav or os.path.splitext(args.input_wav)[0] + \"_sws.wav\"\n )\n\n input_path = Path(args.input_wav)\n output_path = Path(args.output_wav)\n\n if not input_path.exists():\n print(f\"Cannot open {args.input_wav} for reading.\")\n exit(-1)\n\n \n wav, fs = load_wave(input_path)\n print(f\"Read {input_path}\")\n\n wav_filtered = normalize(bp_filter_and_decimate(\n wav, args.lp, args.hp, fs, decimate=args.decimate\n ))\n if args.sine:\n modulated = sinethesise(\n wav_filtered,\n frame_len=args.window,\n order=args.order,\n use_lsp=args.use_lsp,\n sr=fs / args.decimate,\n noise=0.0,\n )\n if args.buzz or args.noise:\n\n if args.buzz:\n N = 12 * np.log2(float(args.buzz)/440.0) + 69\n \n k = np.exp(-0.1513*N) + 15.927 # ModFM k values from: http://mural.maynoothuniversity.ie/4104/1/VL_New_perspectives.pdf\n \n carrier = modfm_buzz(len(wav_filtered), f=np.full(len(wav_filtered), args.buzz, dtype=np.float64),\n sr=float(fs/args.decimate), k=np.full(len(wav_filtered), k*k))\n if args.noise:\n carrier = np.random.normal(0,1,len(wav_filtered))\n\n modulated = lpc_vocode(wav_filtered, frame_len=args.window, order=args.order,\n carrier=carrier, residual_amp=0, vocode_amp=1, env=True, freq_shift=1)\n\n # un-decimate, normalize and write out\n up_modulated = normalize(upsample(modulated, args.decimate))\n \n scipy.io.wavfile.write(output_path, fs, up_modulated)\n print(f\"Wrote {output_path}\")\n"
] | [
[
"numpy.imag",
"scipy.signal.correlate",
"numpy.concatenate",
"numpy.max",
"numpy.zeros_like",
"numpy.mean",
"numpy.polynomial.polynomial.polyfromroots",
"numpy.exp",
"scipy.io.wavfile.read",
"numpy.arange",
"numpy.sin",
"numpy.atleast_1d",
"numpy.roots",
"scipy.signal.butter",
"numpy.diff",
"scipy.signal.lfilter",
"numpy.zeros",
"numpy.log",
"scipy.signal.resample_poly",
"numpy.argsort",
"numpy.array",
"numpy.isreal",
"scipy.io.wavfile.write",
"numpy.abs",
"scipy.signal.filtfilt",
"numpy.isfinite",
"numpy.conj",
"numpy.cos",
"numpy.random.uniform",
"numpy.random.normal",
"scipy.signal.hann",
"numpy.angle",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.18",
"1.0",
"0.19"
],
"tensorflow": []
}
] |
mokshagna517/recommendation_sys | [
"bc8ced225dff3c93d619ff5da363f42d0aa0676c",
"bc8ced225dff3c93d619ff5da363f42d0aa0676c",
"bc8ced225dff3c93d619ff5da363f42d0aa0676c"
] | [
"venv/Lib/site-packages/pandas/tests/extension/test_categorical.py",
"venv/Lib/site-packages/sklearn/linear_model/tests/test_sparse_coordinate_descent.py",
"venv/Lib/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py"
] | [
"\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n\"\"\"\nimport string\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Categorical\nfrom pandas.api.types import CategoricalDtype\nfrom pandas.tests.extension import base\nimport pandas.util.testing as tm\n\n\ndef make_data():\n while True:\n values = np.random.choice(list(string.ascii_letters), size=100)\n # ensure we meet the requirements\n # 1. first two not null\n # 2. first and second are different\n if values[0] != values[1]:\n break\n return values\n\n\[email protected]\ndef dtype():\n return CategoricalDtype()\n\n\[email protected]\ndef data():\n \"\"\"Length-100 array for this type.\n\n * data[0] and data[1] should both be non missing\n * data[0] and data[1] should not gbe equal\n \"\"\"\n return Categorical(make_data())\n\n\[email protected]\ndef data_missing():\n \"\"\"Length 2 array with [NA, Valid]\"\"\"\n return Categorical([np.nan, \"A\"])\n\n\[email protected]\ndef data_for_sorting():\n return Categorical([\"A\", \"B\", \"C\"], categories=[\"C\", \"A\", \"B\"], ordered=True)\n\n\[email protected]\ndef data_missing_for_sorting():\n return Categorical([\"A\", None, \"B\"], categories=[\"B\", \"A\"], ordered=True)\n\n\[email protected]\ndef na_value():\n return np.nan\n\n\[email protected]\ndef data_for_grouping():\n return Categorical([\"a\", \"a\", None, None, \"b\", \"b\", \"a\", \"c\"])\n\n\nclass TestDtype(base.BaseDtypeTests):\n pass\n\n\nclass TestInterface(base.BaseInterfaceTests):\n @pytest.mark.skip(reason=\"Memory usage doesn't match\")\n def test_memory_usage(self, data):\n # Is this deliberate?\n super().test_memory_usage(data)\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n pass\n\n\nclass TestReshaping(base.BaseReshapingTests):\n def test_ravel(self, data):\n # GH#27199 Categorical.ravel returns self until after deprecation cycle\n with tm.assert_produces_warning(FutureWarning):\n data.ravel()\n\n\nclass TestGetitem(base.BaseGetitemTests):\n skip_take = pytest.mark.skip(reason=\"GH-20664.\")\n\n @pytest.mark.skip(reason=\"Backwards compatibility\")\n def test_getitem_scalar(self, data):\n # CategoricalDtype.type isn't \"correct\" since it should\n # be a parent of the elements (object). But don't want\n # to break things by changing.\n super().test_getitem_scalar(data)\n\n @skip_take\n def test_take(self, data, na_value, na_cmp):\n # TODO remove this once Categorical.take is fixed\n super().test_take(data, na_value, na_cmp)\n\n @skip_take\n def test_take_negative(self, data):\n super().test_take_negative(data)\n\n @skip_take\n def test_take_pandas_style_negative_raises(self, data, na_value):\n super().test_take_pandas_style_negative_raises(data, na_value)\n\n @skip_take\n def test_take_non_na_fill_value(self, data_missing):\n super().test_take_non_na_fill_value(data_missing)\n\n @skip_take\n def test_take_out_of_bounds_raises(self, data, allow_fill):\n return super().test_take_out_of_bounds_raises(data, allow_fill)\n\n @pytest.mark.skip(reason=\"GH-20747. Unobserved categories.\")\n def test_take_series(self, data):\n super().test_take_series(data)\n\n @skip_take\n def test_reindex_non_na_fill_value(self, data_missing):\n super().test_reindex_non_na_fill_value(data_missing)\n\n @pytest.mark.skip(reason=\"Categorical.take buggy\")\n def test_take_empty(self, data, na_value, na_cmp):\n super().test_take_empty(data, na_value, na_cmp)\n\n @pytest.mark.skip(reason=\"test not written correctly for categorical\")\n def test_reindex(self, data, na_value):\n super().test_reindex(data, na_value)\n\n\nclass TestSetitem(base.BaseSetitemTests):\n pass\n\n\nclass TestMissing(base.BaseMissingTests):\n @pytest.mark.skip(reason=\"Not implemented\")\n def test_fillna_limit_pad(self, data_missing):\n super().test_fillna_limit_pad(data_missing)\n\n @pytest.mark.skip(reason=\"Not implemented\")\n def test_fillna_limit_backfill(self, data_missing):\n super().test_fillna_limit_backfill(data_missing)\n\n\nclass TestReduce(base.BaseNoReduceTests):\n pass\n\n\nclass TestMethods(base.BaseMethodsTests):\n @pytest.mark.skip(reason=\"Unobserved categories included\")\n def test_value_counts(self, all_data, dropna):\n return super().test_value_counts(all_data, dropna)\n\n def test_combine_add(self, data_repeated):\n # GH 20825\n # When adding categoricals in combine, result is a string\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 + x2)\n expected = pd.Series(\n ([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))])\n )\n self.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 + x2)\n expected = pd.Series([a + val for a in list(orig_data1)])\n self.assert_series_equal(result, expected)\n\n @pytest.mark.skip(reason=\"Not Applicable\")\n def test_fillna_length_mismatch(self, data_missing):\n super().test_fillna_length_mismatch(data_missing)\n\n def test_searchsorted(self, data_for_sorting):\n if not data_for_sorting.ordered:\n raise pytest.skip(reason=\"searchsorted requires ordered data.\")\n\n\nclass TestCasting(base.BaseCastingTests):\n pass\n\n\nclass TestArithmeticOps(base.BaseArithmeticOpsTests):\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators):\n\n op_name = all_arithmetic_operators\n if op_name != \"__rmod__\":\n super().test_arith_series_with_scalar(data, op_name)\n else:\n pytest.skip(\"rmod never called when string is first argument\")\n\n def test_add_series_with_extension_array(self, data):\n ser = pd.Series(data)\n with pytest.raises(TypeError, match=\"cannot perform\"):\n ser + data\n\n def test_divmod_series_array(self):\n # GH 23287\n # skipping because it is not implemented\n pass\n\n def _check_divmod_op(self, s, op, other, exc=NotImplementedError):\n return super()._check_divmod_op(s, op, other, exc=TypeError)\n\n\nclass TestComparisonOps(base.BaseComparisonOpsTests):\n def _compare_other(self, s, data, op_name, other):\n op = self.get_op_from_name(op_name)\n if op_name == \"__eq__\":\n result = op(s, other)\n expected = s.combine(other, lambda x, y: x == y)\n assert (result == expected).all()\n\n elif op_name == \"__ne__\":\n result = op(s, other)\n expected = s.combine(other, lambda x, y: x != y)\n assert (result == expected).all()\n\n else:\n with pytest.raises(TypeError):\n op(data, other)\n\n\nclass TestParsing(base.BaseParsingTests):\n pass\n",
"import numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_less\n\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.exceptions import ConvergenceWarning\n\nfrom sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,\n LassoCV, ElasticNetCV)\n\n\ndef test_sparse_coef():\n # Check that the sparse_coef property works\n clf = ElasticNet()\n clf.coef_ = [1, 2, 3]\n\n assert sp.isspmatrix(clf.sparse_coef_)\n assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)\n\n\ndef test_normalize_option():\n # Check that the normalize option in enet works\n X = sp.csc_matrix([[-1], [0], [1]])\n y = [-1, 0, 1]\n clf_dense = ElasticNet(fit_intercept=True, normalize=True)\n clf_sparse = ElasticNet(fit_intercept=True, normalize=True)\n clf_dense.fit(X, y)\n X = sp.csc_matrix(X)\n clf_sparse.fit(X, y)\n assert_almost_equal(clf_dense.dual_gap_, 0)\n assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)\n\n\ndef test_lasso_zero():\n # Check that the sparse lasso can handle zero data without crashing\n X = sp.csc_matrix((3, 1))\n y = [0, 0, 0]\n T = np.array([[1], [2], [3]])\n clf = Lasso().fit(X, y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [0])\n assert_array_almost_equal(pred, [0, 0, 0])\n assert_almost_equal(clf.dual_gap_, 0)\n\n\ndef test_enet_toy_list_input():\n # Test ElasticNet for various values of alpha and l1_ratio with list X\n\n X = np.array([[-1], [0], [1]])\n X = sp.csc_matrix(X)\n Y = [-1, 0, 1] # just a straight line\n T = np.array([[2], [3], [4]]) # test sample\n\n # this should be the same as unregularized least squares\n clf = ElasticNet(alpha=0, l1_ratio=1.0)\n # catch warning about alpha=0.\n # this is discouraged but should work.\n ignore_warnings(clf.fit)(X, Y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [1])\n assert_array_almost_equal(pred, [2, 3, 4])\n assert_almost_equal(clf.dual_gap_, 0)\n\n clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)\n clf.fit(X, Y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)\n assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)\n assert_almost_equal(clf.dual_gap_, 0)\n\n clf = ElasticNet(alpha=0.5, l1_ratio=0.5)\n clf.fit(X, Y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [0.45454], 3)\n assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)\n assert_almost_equal(clf.dual_gap_, 0)\n\n\ndef test_enet_toy_explicit_sparse_input():\n # Test ElasticNet for various values of alpha and l1_ratio with sparse X\n f = ignore_warnings\n # training samples\n X = sp.lil_matrix((3, 1))\n X[0, 0] = -1\n # X[1, 0] = 0\n X[2, 0] = 1\n Y = [-1, 0, 1] # just a straight line (the identity function)\n\n # test samples\n T = sp.lil_matrix((3, 1))\n T[0, 0] = 2\n T[1, 0] = 3\n T[2, 0] = 4\n\n # this should be the same as lasso\n clf = ElasticNet(alpha=0, l1_ratio=1.0)\n f(clf.fit)(X, Y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [1])\n assert_array_almost_equal(pred, [2, 3, 4])\n assert_almost_equal(clf.dual_gap_, 0)\n\n clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)\n clf.fit(X, Y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)\n assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)\n assert_almost_equal(clf.dual_gap_, 0)\n\n clf = ElasticNet(alpha=0.5, l1_ratio=0.5)\n clf.fit(X, Y)\n pred = clf.predict(T)\n assert_array_almost_equal(clf.coef_, [0.45454], 3)\n assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)\n assert_almost_equal(clf.dual_gap_, 0)\n\n\ndef make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,\n positive=False, n_targets=1):\n random_state = np.random.RandomState(seed)\n\n # build an ill-posed linear regression problem with many noisy features and\n # comparatively few samples\n\n # generate a ground truth model\n w = random_state.randn(n_features, n_targets)\n w[n_informative:] = 0.0 # only the top features are impacting the model\n if positive:\n w = np.abs(w)\n\n X = random_state.randn(n_samples, n_features)\n rnd = random_state.uniform(size=(n_samples, n_features))\n X[rnd > 0.5] = 0.0 # 50% of zeros in input signal\n\n # generate training ground truth labels\n y = np.dot(X, w)\n X = sp.csc_matrix(X)\n if n_targets == 1:\n y = np.ravel(y)\n return X, y\n\n\ndef _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):\n n_samples, n_features, max_iter = 100, 100, 1000\n n_informative = 10\n\n X, y = make_sparse_data(n_samples, n_features, n_informative,\n positive=positive)\n\n X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]\n y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]\n\n s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,\n max_iter=max_iter, tol=1e-7, positive=positive,\n warm_start=True)\n s_clf.fit(X_train, y_train)\n\n assert_almost_equal(s_clf.dual_gap_, 0, 4)\n assert_greater(s_clf.score(X_test, y_test), 0.85)\n\n # check the convergence is the same as the dense version\n d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,\n max_iter=max_iter, tol=1e-7, positive=positive,\n warm_start=True)\n d_clf.fit(X_train.toarray(), y_train)\n\n assert_almost_equal(d_clf.dual_gap_, 0, 4)\n assert_greater(d_clf.score(X_test, y_test), 0.85)\n\n assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)\n assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)\n\n # check that the coefs are sparse\n assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)\n\n\ndef test_sparse_enet_not_as_toy_dataset():\n _test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,\n positive=False)\n _test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,\n positive=False)\n _test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,\n positive=True)\n _test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,\n positive=True)\n\n\ndef test_sparse_lasso_not_as_toy_dataset():\n n_samples = 100\n max_iter = 1000\n n_informative = 10\n X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)\n\n X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]\n y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]\n\n s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)\n s_clf.fit(X_train, y_train)\n assert_almost_equal(s_clf.dual_gap_, 0, 4)\n assert_greater(s_clf.score(X_test, y_test), 0.85)\n\n # check the convergence is the same as the dense version\n d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)\n d_clf.fit(X_train.toarray(), y_train)\n assert_almost_equal(d_clf.dual_gap_, 0, 4)\n assert_greater(d_clf.score(X_test, y_test), 0.85)\n\n # check that the coefs are sparse\n assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)\n\n\ndef test_enet_multitarget():\n n_targets = 3\n X, y = make_sparse_data(n_targets=n_targets)\n\n estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)\n # XXX: There is a bug when precompute is not None!\n estimator.fit(X, y)\n coef, intercept, dual_gap = (estimator.coef_,\n estimator.intercept_,\n estimator.dual_gap_)\n\n for k in range(n_targets):\n estimator.fit(X, y[:, k])\n assert_array_almost_equal(coef[k, :], estimator.coef_)\n assert_array_almost_equal(intercept[k], estimator.intercept_)\n assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)\n\n\ndef test_path_parameters():\n X, y = make_sparse_data()\n max_iter = 50\n n_alphas = 10\n clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,\n l1_ratio=0.5, fit_intercept=False)\n ignore_warnings(clf.fit)(X, y) # new params\n assert_almost_equal(0.5, clf.l1_ratio)\n assert_equal(n_alphas, clf.n_alphas)\n assert_equal(n_alphas, len(clf.alphas_))\n sparse_mse_path = clf.mse_path_\n ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data\n assert_almost_equal(clf.mse_path_, sparse_mse_path)\n\n\ndef test_same_output_sparse_dense_lasso_and_enet_cv():\n X, y = make_sparse_data(n_samples=40, n_features=10)\n for normalize in [True, False]:\n clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)\n ignore_warnings(clfs.fit)(X, y)\n clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)\n ignore_warnings(clfd.fit)(X.toarray(), y)\n assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)\n assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)\n assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)\n assert_array_almost_equal(clfs.alphas_, clfd.alphas_)\n\n clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)\n ignore_warnings(clfs.fit)(X, y)\n clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)\n ignore_warnings(clfd.fit)(X.toarray(), y)\n assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)\n assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)\n assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)\n assert_array_almost_equal(clfs.alphas_, clfd.alphas_)\n\n\ndef test_same_multiple_output_sparse_dense():\n for normalize in [True, False]:\n l = ElasticNet(normalize=normalize)\n X = [[0, 1, 2, 3, 4],\n [0, 2, 5, 8, 11],\n [9, 10, 11, 12, 13],\n [10, 11, 12, 13, 14]]\n y = [[1, 2, 3, 4, 5],\n [1, 3, 6, 9, 12],\n [10, 11, 12, 13, 14],\n [11, 12, 13, 14, 15]]\n ignore_warnings(l.fit)(X, y)\n sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)\n predict_dense = l.predict(sample)\n\n l_sp = ElasticNet(normalize=normalize)\n X_sp = sp.coo_matrix(X)\n ignore_warnings(l_sp.fit)(X_sp, y)\n sample_sparse = sp.coo_matrix(sample)\n predict_sparse = l_sp.predict(sample_sparse)\n\n assert_array_almost_equal(predict_sparse, predict_dense)\n\n\ndef test_sparse_enet_coordinate_descent():\n \"\"\"Test that a warning is issued if model does not converge\"\"\"\n clf = Lasso(max_iter=2)\n n_samples = 5\n n_features = 2\n X = sp.csc_matrix((n_samples, n_features)) * 1e50\n y = np.ones(n_samples)\n assert_warns(ConvergenceWarning, clf.fit, X, y)\n",
"#\n# Created by: Pearu Peterson, March 2002\n#\n\"\"\" Test functions for scipy.linalg.matfuncs module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport math\n\nimport numpy as np\nfrom numpy import array, eye, exp, random\nfrom numpy.linalg import matrix_power\nfrom numpy.testing import (\n assert_allclose, assert_, assert_array_almost_equal, assert_equal,\n assert_array_almost_equal_nulp)\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nfrom scipy.sparse import csc_matrix, SparseEfficiencyWarning\nfrom scipy.sparse.construct import eye as speye\nfrom scipy.sparse.linalg.matfuncs import (expm, _expm,\n ProductOperator, MatrixPowerOperator,\n _onenorm_matrix_power_nnm)\nfrom scipy.sparse.sputils import matrix\nfrom scipy.linalg import logm\nfrom scipy.special import factorial, binom\nimport scipy.sparse\nimport scipy.sparse.linalg\n\n\ndef _burkardt_13_power(n, p):\n \"\"\"\n A helper function for testing matrix functions.\n\n Parameters\n ----------\n n : integer greater than 1\n Order of the square matrix to be returned.\n p : non-negative integer\n Power of the matrix.\n\n Returns\n -------\n out : ndarray representing a square matrix\n A Forsythe matrix of order n, raised to the power p.\n\n \"\"\"\n # Input validation.\n if n != int(n) or n < 2:\n raise ValueError('n must be an integer greater than 1')\n n = int(n)\n if p != int(p) or p < 0:\n raise ValueError('p must be a non-negative integer')\n p = int(p)\n\n # Construct the matrix explicitly.\n a, b = divmod(p, n)\n large = np.power(10.0, -n*a)\n small = large * np.power(10.0, -n)\n return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)\n\n\ndef test_onenorm_matrix_power_nnm():\n np.random.seed(1234)\n for n in range(1, 5):\n for p in range(5):\n M = np.random.random((n, n))\n Mp = np.linalg.matrix_power(M, p)\n observed = _onenorm_matrix_power_nnm(M, p)\n expected = np.linalg.norm(Mp, 1)\n assert_allclose(observed, expected)\n\n\nclass TestExpM(object):\n def test_zero_ndarray(self):\n a = array([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a),[[1,0],[0,1]])\n\n def test_zero_sparse(self):\n a = csc_matrix([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])\n\n def test_zero_matrix(self):\n a = matrix([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a),[[1,0],[0,1]])\n\n def test_misc_types(self):\n A = expm(np.array([[1]]))\n assert_allclose(expm(((1,),)), A)\n assert_allclose(expm([[1]]), A)\n assert_allclose(expm(matrix([[1]])), A)\n assert_allclose(expm(np.array([[1]])), A)\n assert_allclose(expm(csc_matrix([[1]])).A, A)\n B = expm(np.array([[1j]]))\n assert_allclose(expm(((1j,),)), B)\n assert_allclose(expm([[1j]]), B)\n assert_allclose(expm(matrix([[1j]])), B)\n assert_allclose(expm(csc_matrix([[1j]])).A, B)\n\n def test_bidiagonal_sparse(self):\n A = csc_matrix([\n [1, 3, 0],\n [0, 1, 5],\n [0, 0, 2]], dtype=float)\n e1 = math.exp(1)\n e2 = math.exp(2)\n expected = np.array([\n [e1, 3*e1, 15*(e2 - 2*e1)],\n [0, e1, 5*(e2 - e1)],\n [0, 0, e2]], dtype=float)\n observed = expm(A).toarray()\n assert_array_almost_equal(observed, expected)\n\n def test_padecases_dtype_float(self):\n for dtype in [np.float32, np.float64]:\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n A = scale * eye(3, dtype=dtype)\n observed = expm(A)\n expected = exp(scale) * eye(3, dtype=dtype)\n assert_array_almost_equal_nulp(observed, expected, nulp=100)\n\n def test_padecases_dtype_complex(self):\n for dtype in [np.complex64, np.complex128]:\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n A = scale * eye(3, dtype=dtype)\n observed = expm(A)\n expected = exp(scale) * eye(3, dtype=dtype)\n assert_array_almost_equal_nulp(observed, expected, nulp=100)\n\n def test_padecases_dtype_sparse_float(self):\n # float32 and complex64 lead to errors in spsolve/UMFpack\n dtype = np.float64\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n a = scale * speye(3, 3, dtype=dtype, format='csc')\n e = exp(scale) * eye(3, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(SparseEfficiencyWarning,\n \"Changing the sparsity structure of a csc_matrix is expensive.\")\n exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()\n inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()\n assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)\n assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)\n\n def test_padecases_dtype_sparse_complex(self):\n # float32 and complex64 lead to errors in spsolve/UMFpack\n dtype = np.complex128\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n a = scale * speye(3, 3, dtype=dtype, format='csc')\n e = exp(scale) * eye(3, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(SparseEfficiencyWarning,\n \"Changing the sparsity structure of a csc_matrix is expensive.\")\n assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)\n\n def test_logm_consistency(self):\n random.seed(1234)\n for dtype in [np.float64, np.complex128]:\n for n in range(1, 10):\n for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:\n # make logm(A) be of a given scale\n A = (eye(n) + random.rand(n, n) * scale).astype(dtype)\n if np.iscomplexobj(A):\n A = A + 1j * random.rand(n, n) * scale\n assert_array_almost_equal(expm(logm(A)), A)\n\n def test_integer_matrix(self):\n Q = np.array([\n [-3, 1, 1, 1],\n [1, -3, 1, 1],\n [1, 1, -3, 1],\n [1, 1, 1, -3]])\n assert_allclose(expm(Q), expm(1.0 * Q))\n\n def test_integer_matrix_2(self):\n # Check for integer overflows\n Q = np.array([[-500, 500, 0, 0],\n [0, -550, 360, 190],\n [0, 630, -630, 0],\n [0, 0, 0, 0]], dtype=np.int16)\n assert_allclose(expm(Q), expm(1.0 * Q))\n\n Q = csc_matrix(Q)\n assert_allclose(expm(Q).A, expm(1.0 * Q).A)\n\n def test_triangularity_perturbation(self):\n # Experiment (1) of\n # Awad H. Al-Mohy and Nicholas J. Higham (2012)\n # Improved Inverse Scaling and Squaring Algorithms\n # for the Matrix Logarithm.\n A = np.array([\n [3.2346e-1, 3e4, 3e4, 3e4],\n [0, 3.0089e-1, 3e4, 3e4],\n [0, 0, 3.221e-1, 3e4],\n [0, 0, 0, 3.0744e-1]],\n dtype=float)\n A_logm = np.array([\n [-1.12867982029050462e+00, 9.61418377142025565e+04,\n -4.52485573953179264e+09, 2.92496941103871812e+14],\n [0.00000000000000000e+00, -1.20101052953082288e+00,\n 9.63469687211303099e+04, -4.68104828911105442e+09],\n [0.00000000000000000e+00, 0.00000000000000000e+00,\n -1.13289322264498393e+00, 9.53249183094775653e+04],\n [0.00000000000000000e+00, 0.00000000000000000e+00,\n 0.00000000000000000e+00, -1.17947533272554850e+00]],\n dtype=float)\n assert_allclose(expm(A_logm), A, rtol=1e-4)\n\n # Perturb the upper triangular matrix by tiny amounts,\n # so that it becomes technically not upper triangular.\n random.seed(1234)\n tiny = 1e-17\n A_logm_perturbed = A_logm.copy()\n A_logm_perturbed[1, 0] = tiny\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"Ill-conditioned.*\")\n A_expm_logm_perturbed = expm(A_logm_perturbed)\n rtol = 1e-4\n atol = 100 * tiny\n assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))\n\n def test_burkardt_1(self):\n # This matrix is diagonal.\n # The calculation of the matrix exponential is simple.\n #\n # This is the first of a series of matrix exponential tests\n # collected by John Burkardt from the following sources.\n #\n # Alan Laub,\n # Review of \"Linear System Theory\" by Joao Hespanha,\n # SIAM Review,\n # Volume 52, Number 4, December 2010, pages 779--781.\n #\n # Cleve Moler and Charles Van Loan,\n # Nineteen Dubious Ways to Compute the Exponential of a Matrix,\n # Twenty-Five Years Later,\n # SIAM Review,\n # Volume 45, Number 1, March 2003, pages 3--49.\n #\n # Cleve Moler,\n # Cleve's Corner: A Balancing Act for the Matrix Exponential,\n # 23 July 2012.\n #\n # Robert Ward,\n # Numerical computation of the matrix exponential\n # with accuracy estimate,\n # SIAM Journal on Numerical Analysis,\n # Volume 14, Number 4, September 1977, pages 600--610.\n exp1 = np.exp(1)\n exp2 = np.exp(2)\n A = np.array([\n [1, 0],\n [0, 2],\n ], dtype=float)\n desired = np.array([\n [exp1, 0],\n [0, exp2],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_2(self):\n # This matrix is symmetric.\n # The calculation of the matrix exponential is straightforward.\n A = np.array([\n [1, 3],\n [3, 2],\n ], dtype=float)\n desired = np.array([\n [39.322809708033859, 46.166301438885753],\n [46.166301438885768, 54.711576854329110],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_3(self):\n # This example is due to Laub.\n # This matrix is ill-suited for the Taylor series approach.\n # As powers of A are computed, the entries blow up too quickly.\n exp1 = np.exp(1)\n exp39 = np.exp(39)\n A = np.array([\n [0, 1],\n [-39, -40],\n ], dtype=float)\n desired = np.array([\n [\n 39/(38*exp1) - 1/(38*exp39),\n -np.expm1(-38) / (38*exp1)],\n [\n 39*np.expm1(-38) / (38*exp1),\n -1/(38*exp1) + 39/(38*exp39)],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_4(self):\n # This example is due to Moler and Van Loan.\n # The example will cause problems for the series summation approach,\n # as well as for diagonal Pade approximations.\n A = np.array([\n [-49, 24],\n [-64, 31],\n ], dtype=float)\n U = np.array([[3, 1], [4, 2]], dtype=float)\n V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)\n w = np.array([-17, -1], dtype=float)\n desired = np.dot(U * np.exp(w), V)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_5(self):\n # This example is due to Moler and Van Loan.\n # This matrix is strictly upper triangular\n # All powers of A are zero beyond some (low) limit.\n # This example will cause problems for Pade approximations.\n A = np.array([\n [0, 6, 0, 0],\n [0, 0, 6, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0],\n ], dtype=float)\n desired = np.array([\n [1, 6, 18, 36],\n [0, 1, 6, 18],\n [0, 0, 1, 6],\n [0, 0, 0, 1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_6(self):\n # This example is due to Moler and Van Loan.\n # This matrix does not have a complete set of eigenvectors.\n # That means the eigenvector approach will fail.\n exp1 = np.exp(1)\n A = np.array([\n [1, 1],\n [0, 1],\n ], dtype=float)\n desired = np.array([\n [exp1, exp1],\n [0, exp1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_7(self):\n # This example is due to Moler and Van Loan.\n # This matrix is very close to example 5.\n # Mathematically, it has a complete set of eigenvectors.\n # Numerically, however, the calculation will be suspect.\n exp1 = np.exp(1)\n eps = np.spacing(1)\n A = np.array([\n [1 + eps, 1],\n [0, 1 - eps],\n ], dtype=float)\n desired = np.array([\n [exp1, exp1],\n [0, exp1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_8(self):\n # This matrix was an example in Wikipedia.\n exp4 = np.exp(4)\n exp16 = np.exp(16)\n A = np.array([\n [21, 17, 6],\n [-5, -1, -6],\n [4, 4, 16],\n ], dtype=float)\n desired = np.array([\n [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],\n [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],\n [16*exp16, 16*exp16, 4*exp16],\n ], dtype=float) * 0.25\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_9(self):\n # This matrix is due to the NAG Library.\n # It is an example for function F01ECF.\n A = np.array([\n [1, 2, 2, 2],\n [3, 1, 1, 2],\n [3, 2, 1, 2],\n [3, 3, 3, 1],\n ], dtype=float)\n desired = np.array([\n [740.7038, 610.8500, 542.2743, 549.1753],\n [731.2510, 603.5524, 535.0884, 542.2743],\n [823.7630, 679.4257, 603.5524, 610.8500],\n [998.4355, 823.7630, 731.2510, 740.7038],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_10(self):\n # This is Ward's example #1.\n # It is defective and nonderogatory.\n A = np.array([\n [4, 2, 0],\n [1, 4, 1],\n [1, 1, 4],\n ], dtype=float)\n assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))\n desired = np.array([\n [147.8666224463699, 183.7651386463682, 71.79703239999647],\n [127.7810855231823, 183.7651386463682, 91.88256932318415],\n [127.7810855231824, 163.6796017231806, 111.9681062463718],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_11(self):\n # This is Ward's example #2.\n # It is a symmetric matrix.\n A = np.array([\n [29.87942128909879, 0.7815750847907159, -2.289519314033932],\n [0.7815750847907159, 25.72656945571064, 8.680737820540137],\n [-2.289519314033932, 8.680737820540137, 34.39400925519054],\n ], dtype=float)\n assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))\n desired = np.array([\n [\n 5.496313853692378E+15,\n -1.823188097200898E+16,\n -3.047577080858001E+16],\n [\n -1.823188097200899E+16,\n 6.060522870222108E+16,\n 1.012918429302482E+17],\n [\n -3.047577080858001E+16,\n 1.012918429302482E+17,\n 1.692944112408493E+17],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_12(self):\n # This is Ward's example #3.\n # Ward's algorithm has difficulty estimating the accuracy\n # of its results.\n A = np.array([\n [-131, 19, 18],\n [-390, 56, 54],\n [-387, 57, 52],\n ], dtype=float)\n assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))\n desired = np.array([\n [-1.509644158793135, 0.3678794391096522, 0.1353352811751005],\n [-5.632570799891469, 1.471517758499875, 0.4060058435250609],\n [-4.934938326088363, 1.103638317328798, 0.5413411267617766],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_13(self):\n # This is Ward's example #4.\n # This is a version of the Forsythe matrix.\n # The eigenvector problem is badly conditioned.\n # Ward's algorithm has difficulty esimating the accuracy\n # of its results for this problem.\n #\n # Check the construction of one instance of this family of matrices.\n A4_actual = _burkardt_13_power(4, 1)\n A4_desired = [[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1e-4, 0, 0, 0]]\n assert_allclose(A4_actual, A4_desired)\n # Check the expm for a few instances.\n for n in (2, 3, 4, 10):\n # Approximate expm using Taylor series.\n # This works well for this matrix family\n # because each matrix in the summation,\n # even before dividing by the factorial,\n # is entrywise positive with max entry 10**(-floor(p/n)*n).\n k = max(1, int(np.ceil(16/n)))\n desired = np.zeros((n, n), dtype=float)\n for p in range(n*k):\n Ap = _burkardt_13_power(n, p)\n assert_equal(np.min(Ap), 0)\n assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))\n desired += Ap / factorial(p)\n actual = expm(_burkardt_13_power(n, 1))\n assert_allclose(actual, desired)\n\n def test_burkardt_14(self):\n # This is Moler's example.\n # This badly scaled matrix caused problems for MATLAB's expm().\n A = np.array([\n [0, 1e-8, 0],\n [-(2e10 + 4e8/6.), -3, 2e10],\n [200./3., 0, -200./3.],\n ], dtype=float)\n desired = np.array([\n [0.446849468283175, 1.54044157383952e-09, 0.462811453558774],\n [-5743067.77947947, -0.0152830038686819, -4526542.71278401],\n [0.447722977849494, 1.54270484519591e-09, 0.463480648837651],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_pascal(self):\n # Test pascal triangle.\n # Nilpotent exponential, used to trigger a failure (gh-8029)\n\n for scale in [1.0, 1e-3, 1e-6]:\n for n in range(120):\n A = np.diag(np.arange(1, n + 1), -1) * scale\n B = expm(A)\n\n sc = scale**np.arange(n, -1, -1)\n if np.any(sc < 1e-300):\n continue\n\n got = B\n expected = binom(np.arange(n + 1)[:,None],\n np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]\n err = abs(expected - got).max()\n atol = 1e-13 * abs(expected).max()\n assert_allclose(got, expected, atol=atol)\n\n\nclass TestOperators(object):\n\n def test_product_operator(self):\n random.seed(1234)\n n = 5\n k = 2\n nsamples = 10\n for i in range(nsamples):\n A = np.random.randn(n, n)\n B = np.random.randn(n, n)\n C = np.random.randn(n, n)\n D = np.random.randn(n, k)\n op = ProductOperator(A, B, C)\n assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))\n assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))\n\n def test_matrix_power_operator(self):\n random.seed(1234)\n n = 5\n k = 2\n p = 3\n nsamples = 10\n for i in range(nsamples):\n A = np.random.randn(n, n)\n B = np.random.randn(n, k)\n op = MatrixPowerOperator(A, p)\n assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))\n assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))\n\n"
] | [
[
"pandas.Categorical",
"pandas.util.testing.assert_produces_warning",
"pandas.api.types.CategoricalDtype",
"pandas.Series"
],
[
"numpy.dot",
"sklearn.utils.testing.assert_array_almost_equal",
"sklearn.utils.testing.assert_almost_equal",
"sklearn.utils.testing.assert_warns",
"sklearn.utils.testing.ignore_warnings",
"scipy.sparse.coo_matrix",
"sklearn.linear_model.coordinate_descent.ElasticNetCV",
"numpy.ravel",
"scipy.sparse.csc_matrix",
"sklearn.linear_model.coordinate_descent.Lasso",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"scipy.sparse.isspmatrix",
"sklearn.utils.testing.assert_equal",
"numpy.abs",
"sklearn.linear_model.coordinate_descent.ElasticNet",
"numpy.ones",
"sklearn.linear_model.coordinate_descent.LassoCV",
"scipy.sparse.lil_matrix"
],
[
"numpy.diag",
"scipy._lib._numpy_compat.suppress_warnings",
"scipy.sparse.linalg.matfuncs._expm",
"numpy.linalg.matrix_power",
"numpy.max",
"scipy.linalg.logm",
"numpy.random.randn",
"numpy.any",
"numpy.iscomplexobj",
"numpy.exp",
"scipy.sparse.linalg.matfuncs.MatrixPowerOperator",
"scipy.sparse.construct.eye",
"scipy.sparse.sputils.matrix",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"scipy.sparse.linalg.matfuncs.expm",
"scipy.sparse.linalg.matfuncs._onenorm_matrix_power_nnm",
"numpy.ceil",
"scipy.special.factorial",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.sparse.csc_matrix",
"numpy.spacing",
"numpy.power",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.min",
"numpy.random.rand",
"numpy.floor",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.random",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.expm1",
"scipy.sparse.linalg.matfuncs.ProductOperator"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"0.15",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
nicpittman/tropical_pacific_carbon_export | [
"eacd3e0382616388f418eb21cad859fe7ae0144a"
] | [
"9z_ENSO_spatial_maps.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 10:40:28 2020\n@author: Nic Pittman\n\nThis code will reproduce Figure 4 in Pittman et al., 2021. \n\nTrends and pvalues are calculated on the fly and not saved anywhere, however could be done easily. \nregridded data is required for this process\n\nThis results in a slower script but works well. All of the processing occurs in the main function.\nEasy to call modified version of this figure.\n\nProduces mean, trend and pval (Stipples) for the following:\n \n figs/Figure4_Spatial_map_update_'+ratio.name+'.png\n \n air-sea flux\n new production \n difference is calculated here\n SST\n TPCA chlorophyll (regridded) \n carbon (as processed into grams)\n \nRequires: \n datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc\n processed/seamask.nc\n processed/flux/fratios.nc\n \n processed/flux/avg_npp_rg_cafe.nc'\n processed/flux/tpca.nc\n datasets/sst/sst.mnmean.nc\n processed/flux/pco2grams.nc\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom carbon_math import *\nfrom mpl_toolkits.basemap import Basemap\nfrom scipy.stats import linregress\n\n \ndef plot_basemap():\n m = Basemap(llcrnrlon=120.,llcrnrlat=-15,urcrnrlon=290,urcrnrlat=15.01,\n resolution='l',projection='merc',fix_aspect=False)\n m.drawcoastlines()\n m.fillcontinents()\n # draw parallels # labels = [left,right,top,bottom]\n m.drawparallels(np.arange(-20,21,10),labels=[1,0,1,1],fontsize=12,latmax=20)\n m.drawmeridians(np.arange(-180,180,30),labels=[0,0,0,1],fontsize=12)\n return m\n\n\n\ndef plot_basemap_row(fig,axn,hovmol,units,title,units_tr,levs=None,levs_trend=None,trend_conversion=None,sb1=7,sb2=3,cmap='viridis',cmaptr='RdBu_r',wu=None,wv=None):\n '''\n Create a plotting function to make it repeatable and nicer\n colormaps should either be viridis or RdBu_r\n axis (number) will be 1,3,5,7 (plots both avg and trend at once)\n \n Unfortunately this function does the processing of mean, trends and pvals on the fly.\n Could save these if needed, but not provided here. \n '''\n fr=0.03\n fs=12\n ms=10\n startday=np.datetime64('2000-01-01')\n \n if title.endswith('pCO2t'):\n endday=np.datetime64('2016-12-01') \n print(title)\n elif title.endswith('chlorophyll'):\n endday=np.datetime64('2017-12-01')\n else:\n endday=np.datetime64('2020-01-01') \n \n ax1=fig.add_subplot(sb1,sb2,axn)\n m=plot_basemap()\n\n lo,la=np.meshgrid(hovmol.lon.values,hovmol.lat.values)\n lo1,la1=m(lo,la)\n \n if type(levs_trend)==type(None):\n f=m.contourf(lo1,la1,hovmol.mean(dim='time'),cmap=cmap) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n else:\n if title=='TPCA Chlorophyll':\n f=m.contourf(lo1,la1,hovmol.mean(dim='time'),extend='max',cmap=cmap,levels=levs) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n else:\n f=m.contourf(lo1,la1,hovmol.mean(dim='time'),cmap=cmap,levels=levs) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n ax1.axhline(0,c='k',linestyle=':')\n\n moorings=[165,190,205,220,235,250]\n for x in moorings:\n x1,y1=m(x,0)\n ax1.plot(x1,y1,marker='x',c='k',markersize=ms)\n \n if title=='SST':\n \n lev=28.5#29.2 #rather than 28.5\n early_sst=hovmol.sel(time=slice('1997-01-01','2002-01-01')).mean(dim='time')#.where(co2.seamask==1)\n late_sst=hovmol.sel(time=slice('2015-01-01','2020-01-01')).mean(dim='time')#.where(co2.seamask==1)\n \n m.contour(lo1,la1,early_sst,levels=[lev],linestyles='dotted',colors='k')\n \n m.contour(lo1,la1,late_sst,levels=[lev],linestyles='solid',colors='k')\n m.contour(lo1,la1,hovmol.mean(dim='time'),levels=[25],linestyles='dashed',colors='k')\n \n \n #wu['lon'],wu['lat']=m(lo,la,wu.lon.values,wu.lat.values)\n #No windspeed vectors now\n #if title=='Wind speed':\n # skip=(slice(None,None,4),slice(None,None,4)) #2 for NCEP 2\n # m.quiver(lo1[skip],la1[skip],wu.mean(dim='time')[skip]/2,wv.mean(dim='time')[skip]/2,scale=90,headwidth=4.5)#,minshaft=2)\n\n\n cb=plt.colorbar(f,ax=ax1,fraction=fr)\n cb.set_label(units,fontsize=fs)\n cb.ax.tick_params(labelsize=fs-1)\n ax1.set_title(chr(ord('`')+axn)+') Average: '+title,fontsize=fs)\n ax1.tick_params(labelsize=fs)\n\n #Trends\n hovmol=hovmol.where(hovmol!=-0.9999,np.nan)\n hm=hovmol.interpolate_na(dim='time').sel(time=slice(startday,endday))\n months=hm.time\n \n dt_dates=pd.to_numeric(months.values.astype('datetime64[D]'))\n num_dates=dt_dates\n hm['time']=num_dates\n\n \n \n\n\n#Functions above make plotting easy.\n# # Code begins\n \n# %%Load data in\n \n#landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI_SOM-FFN_v2018.nc'\nlandsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'\n\n\n\nseamask=xr.open_dataset('processed/seamask.nc') #Because 2020 version doesn't have it.\nseamask= seamask.assign_coords(lon=(seamask.lon % 360)).roll(lon=(seamask.dims['lon']),roll_coords=False).sortby('lon')\t\n\n#It would be preferable to use the 2020 version,\n# landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'\n#However it doesn't include seamask so we are going to need both.... (Unless I save the seamask)\nlandschutzer=xr.open_dataset(landsch_fp)\nlandschutzer= landschutzer.assign_coords(lon=(landschutzer.lon % 360)).roll(lon=(landschutzer.dims['lon']),roll_coords=False).sortby('lon')\t\t#EPIC 1 line fix for the dateline problem.\nland_pac=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))\nland_pac['time']=land_pac.time.astype('datetime64[M]')\nland_pac_all=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))\n\nland_pac=land_pac.fgco2_smoothed\n\natmco2=land_pac_all.atm_co2\ndco2=land_pac_all.dco2\npco2=land_pac_all.spco2_smoothed\nkw=land_pac_all.kw\n\nf_ratios=xr.open_mfdataset('processed/flux/fratios.nc')\nratio=f_ratios.laws2011a#laws2000#laws2000,laws2011a,laws2011b,henson2011\n\nnpp1=xr.open_dataset('processed/flux/avg_npp_rg_cafe.nc')\navg_npp=(npp1.avg_npp/1000)*ratio\n\nland=moles_to_carbon(land_pac)/365 #LANDSCHUTZ\n\n\ndiff=land-avg_npp\ndiff1=diff.where((diff<0.1)|(diff<-0.1),np.nan)\n\n\n# Need to combine the chlorophyll products, takes a bit of memory.\nchl=xr.open_dataset('processed/flux/tpca.nc').tpca#'sw_month.nc')\n\n#mod=xr.open_dataset('datasets/tpca/mod_month.nc')\nchl['time']=chl.time.astype('datetime64[M]')\n#mod['time']=mod.time.astype('datetime64[M]')\n#tpca=sw\n#tpca=tpca.merge(mod)\n#chl = tpca.to_array(dim='tpca').mean('tpca')\n\n#SST\nsst = xr.open_dataset('datasets/sst/sst.mnmean.nc')\nsst= sst.assign_coords(lon=(sst.lon % 360)).roll(lon=(sst.dims['lon']),roll_coords=False).sortby('lon')\t\t#EPIC 1 line fix for the dateline problem.\nsst=sst.sel(lon=slice(120,290),lat=slice(20,-20)).sst\nsst=sst.where(seamask.seamask==1)\n\npCO2 = xr.open_dataarray('processed/flux/pco2grams.nc') #_norm\nintegratedpCO2 = (pCO2*12*50)\n\n#wu=xr.open_dataset('datasets/uwnd.mon.mean.nc').sel(level=1000,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd\n#wv=xr.open_dataset('datasets/vwnd.mon.mean.nc').sel(level=1000,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd\nwu=xr.open_dataset('datasets/uwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd\nwv=xr.open_dataset('datasets/vwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd\ndco2['time']=dco2.time.astype('datetime64[M]')\n\nws=np.sqrt((wu**2)+(wv**2))\n\n\n\nprecip= xr.open_dataset('datasets/precip.mon.mean.enhanced.nc').sel(lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).precip\n\n# # THIS NEEDS TO BE RUN ONCE BUT CAN be memory intensive\n\n# w_ccmp_a=xr.open_mfdataset('datasets/ws_ccmp/*.nc') #Downloaded manually\n# w_ccmp_a['time']=w_ccmp_a.time.astype('datetime64[M]')\n# w_ccmp_a=w_ccmp_a.sel(latitude=slice(-20,20))\n\n# w_ccmp_b=xr.open_mfdataset('datasets/CCMP_winds.nc') #Bulk ErDap download\n# dt=w_ccmp_b.indexes['time'].to_datetimeindex()\n# w_ccmp_b['time']=dt\n\n# w_ccmp=xr.merge([w_ccmp_b,w_ccmp_a])\n\n\n# w_ccmp=w_ccmp.sel(longitude=slice(120,290),latitude=slice(-20,20))\n# ws_ccmp=np.sqrt((w_ccmp.uwnd**2)+(w_ccmp.vwnd**2))\n# ws_ccmp=ws_ccmp.rename({'latitude':'lat','longitude':'lon'})\n# try:\n# ws_ccmp.to_netcdf('datasets/CCMP_windspeed.nc')\n# print('saved')\n# except:\n# pass\n\nws_ccmp=xr.open_dataarray('datasets/CCMP_windspeed.nc')\n#ws_ccmp=xr.open_dataarray('processed/CCMP_ws_1deg.nc')\n\n# %% Prepare Figure \n\n\nlanina=pd.read_csv('processed/indexes/la_nina_events.csv')\ncp_nino=pd.read_csv('processed/indexes/cp_events.csv')\nep_nino=pd.read_csv('processed/indexes/ep_events.csv')\n\nfp='processed/combined_dataset/month_data_exports.nc'\ninfo=xr.open_mfdataset(fp).sel(Mooring=195).to_dataframe()\n\n\n#Process EP, CP and Nino events.\nnina=pd.DataFrame()\nep=pd.DataFrame()\ncp=pd.DataFrame()\nfor i in lanina.iterrows(): nina=nina.append(info[slice(i[1].start,i[1].end)])\nfor i in ep_nino.iterrows(): ep=ep.append(info[slice(i[1].start,i[1].end)])\nfor i in cp_nino.iterrows(): cp=cp.append(info[slice(i[1].start,i[1].end)])\nnina_dates=nina.index\nep_dates=ep.index[4:]\ncp_dates=cp.index\n#all_dates=chl.time\nall_dates=info.index[36:] #2000 - 2020\n\n\nfig=plt.figure(figsize=(19*2/2.54,23*2/2.54))#(figsize=(30,15))\nsb1=7\nsb2=3\n\n\n#%% EP\n\nplot_basemap_row(fig,axn=1,\n hovmol=sst.sel(time=ep_dates),\n units='Degrees C',\n title='SST',\n units_tr='Degrees C year$^{-1}$',\n levs=np.arange(20,32,1),\n levs_trend=np.arange(-0.06,0.07,0.01),\n #trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=4,\n hovmol=ws_ccmp.sel(time=ep_dates),\n units='m s$^{-1}$',\n title='Wind speed',\n units_tr='m s$^{-1}$ year$^{-1}$', \n levs=np.arange(0,11,1),\n levs_trend=np.arange(-0.1,0.125,0.025),\n \n #levs_trend=np.arange(-0.15,0.175,0.025),\n #trend_conversion=1000,\n cmap='viridis',\n wu=wu,wv=wv)\n\nplot_basemap_row(fig,axn=7,\n hovmol=chl.sel(time=ep_dates),\n units='mg chl m$^{-3}$ day$^{-1}$',\n title='TPCA chlorophyll',\n units_tr='ug chl m$^{-3}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.65,0.05),\n \n levs_trend=np.arange(-4,4.1,1),\n trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=10,\n hovmol=avg_npp.sel(lat=slice(-15,15),time=ep_dates),\n units='gC m$^{-2}$ day$^{-1}$',\n title='New production',\n units_tr='mgC m$^{-2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.26,0.025),\n levs_trend=np.arange(-2,2.1,0.25),\n trend_conversion=1000,\n cmap='viridis')\n\nplot_basemap_row(fig,axn=13,\n hovmol=precip.sel(time=ep_dates),\n units='mm day$^{-1}$',\n title='Precipitation',\n units_tr='mm day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,13,1),\n levs_trend=np.arange(-0.08,0.085,0.005),\n #trend_conversion=1000,\n cmap='viridis')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=16,\n hovmol=dco2.sel(time=ep_dates),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n units_tr='μatm year$^{-1}$',\n levs=np.arange(-15,121,10),#(200,1200,10),#(5.5,9.5,0.5),\n levs_trend=np.arange(-2.5,2.6,0.1),\n trend_conversion=1,#1000,\n cmap='viridis',\n cmaptr='RdBu_r')#'Reds')\n\nplot_basemap_row(fig,axn=19,\n hovmol=land.sel(time=ep_dates),\n units='gC m$_{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n units_tr='mgC m$^{2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(-0.14,0.15,0.02),\n levs_trend=np.arange(-2,2.1,0.5),\n trend_conversion=1000,\n cmap='RdBu_r')\n\n\n# %% CP\n\nplot_basemap_row(fig,axn=2,\n hovmol=sst.sel(time=cp_dates),\n units='Degrees C',\n title='SST',\n units_tr='Degrees C year$^{-1}$',\n levs=np.arange(20,32,1),\n levs_trend=np.arange(-0.06,0.07,0.01),\n #trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=5,\n hovmol=ws_ccmp.sel(time=cp_dates[:-7]),\n units='m s$^{-1}$',\n title='Wind speed',\n units_tr='m s$^{-1}$ year$^{-1}$', \n levs=np.arange(0,11,1),\n levs_trend=np.arange(-0.1,0.125,0.025),\n \n #levs_trend=np.arange(-0.15,0.175,0.025),\n #trend_conversion=1000,\n cmap='viridis',\n wu=wu,wv=wv)\n\nplot_basemap_row(fig,axn=8,\n hovmol=chl.sel(time=cp_dates[:-5]),\n units='mg chl m$^{-3}$ day$^{-1}$',\n title='TPCA chlorophyll',\n units_tr='ug chl m$^{-3}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.65,0.05),\n \n levs_trend=np.arange(-4,4.1,1),\n trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=11,\n hovmol=avg_npp.sel(lat=slice(-15,15),time=cp_dates),\n units='gC m$^{-2}$ day$^{-1}$',\n title='New production',\n units_tr='mgC m$^{-2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.26,0.025),\n levs_trend=np.arange(-2,2.1,0.25),\n trend_conversion=1000,\n cmap='viridis')\n\nplot_basemap_row(fig,axn=14,\n hovmol=precip.sel(time=cp_dates),\n units='mm day$^{-1}$',\n title='Precipitation',\n units_tr='mm day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,13,1),\n levs_trend=np.arange(-0.08,0.085,0.005),\n #trend_conversion=1000,\n cmap='viridis')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=17,\n hovmol=dco2.sel(time=cp_dates),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n units_tr='μatm year$^{-1}$',\n levs=np.arange(-15,121,10),#(200,1200,10),#(5.5,9.5,0.5),\n levs_trend=np.arange(-2.5,2.6,0.1),\n trend_conversion=1,#1000,\n cmap='viridis',\n cmaptr='RdBu_r')#'Reds')\n\nplot_basemap_row(fig,axn=20,\n hovmol=land.sel(time=cp_dates),\n units='gC m$_{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n units_tr='mgC m$^{2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(-0.14,0.15,0.02),\n levs_trend=np.arange(-2,2.1,0.5),\n trend_conversion=1000,\n cmap='RdBu_r')\n\n\n#%% NINA\n\n\nplot_basemap_row(fig,axn=3,\n hovmol=sst.sel(time=nina_dates),\n units='Degrees C',\n title='SST',\n units_tr='Degrees C year$^{-1}$',\n levs=np.arange(20,32,1),\n levs_trend=np.arange(-0.06,0.07,0.01),\n #trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=6,\n hovmol=ws_ccmp.sel(time=nina_dates),\n units='m s$^{-1}$',\n title='Wind speed',\n units_tr='m s$^{-1}$ year$^{-1}$', \n levs=np.arange(0,11,1),\n levs_trend=np.arange(-0.1,0.125,0.025),\n \n #levs_trend=np.arange(-0.15,0.175,0.025),\n #trend_conversion=1000,\n cmap='viridis',\n wu=wu,wv=wv)\n\nplot_basemap_row(fig,axn=9,\n hovmol=chl.sel(time=nina_dates),\n units='mg chl m$^{-3}$ day$^{-1}$',\n title='TPCA chlorophyll',\n units_tr='ug chl m$^{-3}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.65,0.05),\n \n levs_trend=np.arange(-4,4.1,1),\n trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=12,\n hovmol=avg_npp.sel(lat=slice(-15,15),time=nina_dates),\n units='gC m$^{-2}$ day$^{-1}$',\n title='New production',\n units_tr='mgC m$^{-2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.26,0.025),\n levs_trend=np.arange(-2,2.1,0.25),\n trend_conversion=1000,\n cmap='viridis')\n\nplot_basemap_row(fig,axn=15,\n hovmol=precip.sel(time=nina_dates),\n units='mm day$^{-1}$',\n title='Precipitation',\n units_tr='mm day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,13,1),\n levs_trend=np.arange(-0.08,0.085,0.005),\n #trend_conversion=1000,\n cmap='viridis')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=18,\n hovmol=dco2.sel(time=nina_dates),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n units_tr='μatm year$^{-1}$',\n levs=np.arange(-15,121,10),#(200,1200,10),#(5.5,9.5,0.5),\n levs_trend=np.arange(-2.5,2.6,0.1),\n trend_conversion=1,#1000,\n cmap='viridis',\n cmaptr='RdBu_r')#'Reds')\n\nplot_basemap_row(fig,axn=21,\n hovmol=land.sel(time=nina_dates),\n units='gC m$_{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n units_tr='mgC m$^{2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(-0.14,0.15,0.02),\n levs_trend=np.arange(-2,2.1,0.5),\n trend_conversion=1000,\n cmap='RdBu_r')\n\n\n\n\n\n\nplt.tight_layout()\n# plt.savefig('figs/Figure3_Spatial_map_'+ratio.name+etype+'.png',dpi=100)\n# plt.savefig('figs/vector/Figure3_Spatial_map_'+ratio.name+etype+'.eps')\n# plt.savefig('figs/vector/Figure3_Spatial_map_'+ratio.name+etype+'.pdf')\n\n# try:\n# plt.savefig('figs/Figure3_Spatial_map_'+ratio.name+'.jpeg',dpi=300)\n# except:\n# pass\n# plt.show()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"numpy.sqrt",
"numpy.arange",
"pandas.DataFrame",
"numpy.datetime64",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
slowy07/medical-BCDU | [
"dab1ddcacbe093b78e6830d52db2a4e6fabc3d52"
] | [
"lungSegmentation/RFunction.py"
] | [
"from __future__ import division\nimport numpy as np\nfrom scipy.ndimage.morphology import binary_erosion, binary_fill_holes\n\ndef hu_to_grayscale(volume):\n volume = np.clip(volume, -512, 512)\n mxmal = np.max(volume)\n mnval = np.min(volume)\n im_volume = (volume - mnval) / max(mxval - mnval, 1e-3)\n im_volume = im_volume\n \n return im_volume* 255\n\ndef get_mask_lung(vol):\n vol_im = np.where(vol > 0, 1, 0)\n shp = vol.shape\n around_img = np.zeros((shp[0], shp[1], shp[2]), dtype = np.float32)\n for idx in range(shp[0]):\n around_lung[idx, :, :] = binary_erosion(vol_im[idx], structure = np.ones((15, 15))).astype(vol_im.dtype)\n\n return around_lung\n\ndef get_mask(segmentation):\n # initialize ouput to zero\n shp = segmentation.shape\n lung = np.zeros((shp[0], shp[1], shp[2]), dtype = np.float32)\n\n lung[np.equal(segmentation, 255)] = 255\n \n return lung\n\ndef get_FOV(around_lung, lung):\n FOV = np.where((around_lung + lung) > 0, 1, 0)\n for idx in range(FOV.shape[0]):\n FOV[idx, :, :] = binary_fill_holes(FOV[idx, :, :], structure = np.ones((5, 5))).astype(FOV.dtype)\n \n return FOV\n\ndef return_axials(vol, seg):\n vol = vol.get_data()\n seg = seg.get_data()\n seg = seg.astype(np.int32)\n\n # convert to visual format\n vol_ims = hu_to_grayscale(vol_ims)\n lung = get_mask(seg)\n around_lung = get_mask_lung(vol_ims)\n FOV = get_FOV(around_lung, lung)\n \n around_lung = np.where((FOV - lung) > 0, 1, 0)\n \n return vol_ims, lung, around_lung, FOV"
] | [
[
"numpy.min",
"numpy.clip",
"numpy.ones",
"numpy.max",
"numpy.equal",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jlsuarezdiaz/pyDML-Stats | [
"495de64dbcda73ce20d8e916bf5e5077a8dae98a"
] | [
"scripts/utils/toy_datasets.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 26 12:25:25 2018\n\nToy datasets.\n\n@author: jlsuarezdiaz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom six.moves import xrange\nfrom sklearn.preprocessing import LabelEncoder\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import(\n load_iris, load_digits)\n\ndef toy_plot(X,y):\n f, ax = plt.subplots(figsize=(12,9))\n \n plt.axis('equal')\n plt.scatter(X[:,0],X[:,1],c=y,cmap=\"rainbow\",label=y)\n \n #cmap = plt.get_cmap('rainbow')\n #cc = np.unique(y)\n #cn = len(cc)\n #for i,c in enumerate(cc):\n # print(i,c)\n # ind = np.where(y == c)[0]\n # print(ind)\n # XX = X[ind]\n # print(cmap(i/(cn+1)))\n # ax.scatter(XX[:,0],XX[:,1],c=cmap(i/(cn+1)),label=c)\n #plt.legend()\n \n plt.show()\n return plt\n\ndef circular_toy_dataset(rads = [1,2], samples = [200,200], noise = [0.2,0.2], seed = None):\n if seed is not None:\n np.random.seed(seed)\n \n n = sum(samples)\n d = 2\n X = np.empty([n,d])\n y = np.empty([n])\n le = LabelEncoder()\n le.fit(rads)\n \n acum = 0\n for j,s in enumerate(samples):\n for i in xrange(s):\n ns1 = noise[j]*np.random.randn()\n ns2 = noise[j]*np.random.randn()\n x1 = (rads[j]+ns1)*np.cos(2*np.pi*i/s)\n x2 = (rads[j]+ns2)*np.sin(2*np.pi*i/s)\n \n X[acum+i,:] = [x1,x2]\n y[acum+i] = rads[j]\n \n acum += s\n y = le.transform(y)\n \n return X,y\n\ndef hiperplane_toy_dataset(ws = [[1,1],[1,-1]],bs = [[0,0],[0,0]],nsamples=800,xrange=[-1,1],yrange=[-1,1], noise = 0.1,seed = None):\n if seed is not None:\n np.random.seed(seed)\n \n n=nsamples\n d=2\n X = np.random.rand(n,d)\n y = np.zeros([n])\n yy = np.empty([n,len(ws)])\n \n X[:,0] = (xrange[1]-xrange[0])*X[:,0]+xrange[0]\n X[:,1] = (yrange[1]-yrange[0])*X[:,1]+yrange[0]\n \n for j, (w, b) in enumerate(zip(ws,bs)):\n w = np.matrix(w)\n b = np.matrix(b)\n ns = noise*np.random.randn(n,2)\n yy[:,j] = np.sign(((X+ns)-b).dot(w.T)).reshape([n])\n \n yy[yy==-1]=0\n yy = yy.astype(int)\n\n for i in range(n):\n for j, u in enumerate(yy[i,:]):\n y[i] += (u << j)\n \n return X,y\n \ndef iris2d_toy_dataset(dims=[0,2]):\n data=load_iris() # IRIS\n X=data['data']\n X=X[:,dims]\n y=data['target']\n return X,y\n\ndef balls_toy_dataset(centers = [[-2,-2],[0,0],[2,2],[2,-2],[-2,2]],rads = [1.4,1.4,1.4,1.4,1.4],samples=[200,200,200,200,200],noise = [0.3,0.3,0.3,0.3,0.3],seed=None):\n if seed is not None:\n np.random.seed(seed)\n \n n = sum(samples)\n d=2\n \n X=np.empty([n,d])\n y=np.empty([n])\n \n acum=0\n for j, s in enumerate(samples):\n rs = rads[j]*np.random.rand(s)\n angs = 2*np.pi*np.random.rand(s)\n ns = noise[j]*np.random.rand(s)\n c = np.array(centers[j])\n \n for i in xrange(s):\n X[acum+i,:] = c +ns[i] + rs[i]*np.array([np.cos(angs[i]),np.sin(angs[i])])\n y[acum+i]=j\n \n acum += s\n \n return X,y\n\ndef simetria_hor(A):\n nrow, ncol= A.shape\n A = np.abs(A-A[:,::-1]) # Diferencia con la imagen simétrica\n return np.mean(A) # Media de las diferencias (grado de simetría)\n\ndef simetria_ver(A):\n nrow, ncol= A.shape\n A = np.abs(A-A[::-1,:]) # Diferencia con la imagen simétrica\n return np.mean(A) # Media de las diferencias (grado de simetría)\n\ndef digits_toy_dataset(dims=[0,2],numbers=[0,1,2,3,4,5,6,7,8,9]):\n data=load_digits()\n XX = data['data']\n y = data['target']\n nn,dd = XX.shape\n XX = XX.reshape([nn,8,8])\n\n X = np.empty([nn,3])\n for i in xrange(nn):\n X[i,0] = simetria_hor(XX[i,:,:])\n X[i,1] = simetria_ver(XX[i,:,:])\n X[i,2] = np.mean(XX[i,:])\n \n selected = np.where(np.isin(y,numbers))[0]\n \n return X[selected,:][:,dims],y[selected] \n\ndef single_toy_dataset(samples=8, classes = 3, seed=None):\n X = np.empty([samples,2])\n y = np.empty([samples])\n for i in xrange(samples):\n c = np.random.randint(0,classes)\n x = np.random.rand(1,2)\n X[i,:]=x\n y[i]=c\n \n return X,y\n "
] | [
[
"numpy.matrix",
"numpy.mean",
"numpy.random.randn",
"sklearn.preprocessing.LabelEncoder",
"numpy.random.randint",
"numpy.sin",
"matplotlib.pyplot.axis",
"numpy.zeros",
"numpy.isin",
"sklearn.datasets.load_iris",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.cos",
"sklearn.datasets.load_digits",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ayushk4/tsat | [
"07f9535157e45c4b27dae7d73d199fef7fb9d37a",
"07f9535157e45c4b27dae7d73d199fef7fb9d37a"
] | [
"common/metrics/basic_metrics.py",
"ac/data/build.py"
] | [
"#----------------------------------------\n#--------- Torch Related Imports --------\n#----------------------------------------\nimport torch\nimport torch.distributed as distributed\n\n#----------------------------------------\n#--------- Import Wandb Here ------------\n#----------------------------------------\nimport wandb\n\n\nclass TrainAccuracyMetric():\n\n def __init__(self, initial_value, allreduce=False, **kwargs):\n\n self.current_value = initial_value\n self.iterations = 1\n self.allreduce = allreduce\n\n def update(self, new_value):\n\n self.current_value = (self.current_value - (self.current_value-new_value)/(self.iterations + 1))\n\n # If all reduce, get the number of GPUs\n if self.allreduce:\n gpus = torch.tensor(1.0).cuda()\n\n # convert to tensor\n cv = torch.tensor(self.current_value).cuda()\n\n distributed.all_reduce(cv, op=distributed.ReduceOp.SUM)\n distributed.all_reduce(gpus, op=distributed.ReduceOp.SUM)\n\n self.current_value = cv.item()/gpus.item()\n\n self.iterations += 1\n\n def wandb_log(self, metric_name, step):\n\n wandb.log({metric_name: self.current_value}, step=step)\n\nclass TrainLossMetric():\n\n def __init__(self, initial_value, **kwargs):\n\n self.current_value = initial_value\n\n def update(self, new_value):\n\n self.current_value = new_value\n\n def wandb_log(self, metric_name, step):\n\n wandb.log({metric_name: self.current_value}, step=step)\n\nclass ValAccuracyMetric():\n\n def __init__(self, initial_value, allreduce=False, **kwargs):\n\n self.current_value = initial_value\n self.best_value = initial_value\n self.updated_best_val = True\n self.allreduce = allreduce\n\n def update(self, new_value):\n\n self.current_value = new_value\n\n # If all reduce, get the number of GPUs\n if self.allreduce:\n gpus = torch.tensor(1.0).cuda()\n\n # convert to tensor\n cv = torch.tensor(self.current_value).cuda()\n\n distributed.all_reduce(cv, op=distributed.ReduceOp.SUM)\n distributed.all_reduce(gpus, op=distributed.ReduceOp.SUM)\n\n self.current_value = cv.item()/gpus.item()\n\n if self.current_value > self.best_value:\n self.best_value = self.current_value\n self.updated_best_val = True\n else:\n self.updated_best_val = False\n\n def wandb_log(self, metric_name, step):\n\n wandb.log({f'current_{metric_name}': self.current_value, f'best_{metric_name}': self.best_value}, step=step)\n",
"#----------------------------------------\n#--------- Torch related imports --------\n#----------------------------------------\nimport torch\n\n#----------------------------------------\n#--------- Funcs and Classes for Datasets\n#----------------------------------------\nfrom .datasets.kinetics import Kinetics\nfrom .collate import collate_fn\n\nDATASET_CATALOGS = {'kinetics': Kinetics}\n\ndef build_dataset(dataset_name, *args, **kwargs):\n assert dataset_name in DATASET_CATALOGS, \"dataset not in catalogs\"\n return DATASET_CATALOGS[dataset_name](*args, **kwargs)\n\ndef make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):\n if distributed:\n return torch.utils.data.distributed.DistributedSampler(dataset,\n shuffle=shuffle, num_replicas=num_replicas, rank=rank\n )\n if shuffle:\n sampler = torch.utils.data.sampler.RandomSampler(dataset)\n else:\n sampler = torch.utils.data.sampler.SequentialSampler(dataset)\n return sampler\n\ndef make_batch_data_sampler(dataset, sampler, batch_size):\n batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, batch_size, drop_last=False)\n return batch_sampler\n\ndef make_dataloader(config, dataset=None, mode='train',\n distributed=False, num_replicas=None, rank=None):\n\n # config variables\n num_gpu = len(config.GPUS) if isinstance(config.GPUS, list) else len(config.GPUS.split(','))\n num_workers = config.NUM_WORKERS_PER_GPU * num_gpu\n num_replicas = 1 if num_replicas is None else num_replicas\n\n if mode == 'train':\n batch_size = int(config.TRAIN.BATCH_SIZE / num_replicas)\n shuffle = config.TRAIN.SHUFFLE\n splits = config.DATASET.TRAIN_SPLIT\n else:\n batch_size = int(config.VAL.BATCH_SIZE / num_replicas)\n shuffle = config.VAL.SHUFFLE\n splits = config.DATASET.VAL_SPLIT\n\n # create a Dataset class object\n if dataset is None:\n dataset = build_dataset(config.DATASET.DATASET_NAME,\n config=config,\n split=mode\n )\n\n sampler = make_data_sampler(dataset, shuffle, distributed, num_replicas, rank)\n batch_sampler = make_batch_data_sampler(dataset, sampler, batch_size)\n\n dataloader = torch.utils.data.DataLoader(dataset=dataset,\n batch_sampler=batch_sampler,\n num_workers=num_workers,\n collate_fn=collate_fn,\n pin_memory=False\n )\n\n return dataloader\n"
] | [
[
"torch.distributed.all_reduce",
"torch.tensor"
],
[
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SequentialSampler",
"torch.utils.data.sampler.RandomSampler",
"torch.utils.data.sampler.BatchSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iimmortall/QuantLib | [
"29e83dad8738d0fb4efb18d0cb5dd3a7029abd86"
] | [
"losses/loss_factory.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\n\n\ndef cross_entropy_dist_epoch(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n l1_fn = torch.nn.L1Loss(reduction=reduction)\n\n def loss_fn(outputs, outputs_f, labels, epoch, **_):\n loss_dict = dict()\n full_gt_loss = cross_entropy_fn(outputs_f['out'], labels)\n gt_loss = cross_entropy_fn(outputs['out'], labels)\n dist_loss = 0\n layer_names = outputs.keys()\n len_layer = len(layer_names)\n\n for i, layer_name in enumerate(layer_names):\n if i == len_layer - 1:\n continue\n dist_loss += l1_fn(outputs[layer_name], outputs_f[layer_name])\n\n scale = epoch / 100\n if epoch == 100:\n scale = 1\n\n loss_dict['loss'] = scale*(gt_loss + dist_loss) + full_gt_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['full_gt_loss'] = full_gt_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef cross_entropy_dist(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n l1_fn = torch.nn.L1Loss(reduction=reduction)\n\n def loss_fn(outputs, outputs_f, labels, **_):\n loss_dict = dict()\n full_gt_loss = cross_entropy_fn(outputs_f['out'], labels)\n gt_loss = cross_entropy_fn(outputs['out'], labels)\n dist_loss = 0\n layer_names = outputs.keys()\n len_layer = len(layer_names)\n\n for i, layer_name in enumerate(layer_names):\n if i == len_layer - 1:\n continue\n dist_loss += l1_fn(outputs[layer_name], outputs_f[layer_name])\n\n loss_dict['loss'] = gt_loss + dist_loss + full_gt_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['full_gt_loss'] = full_gt_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef cross_entropy(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n def loss_fn(outputs, labels, **_):\n loss_dict = dict()\n gt_loss = cross_entropy_fn(outputs, labels)\n loss_dict['loss'] = gt_loss\n loss_dict['gt_loss'] = gt_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef regularization(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n def loss_fn(outputs, labels, reg_factors, **_):\n loss_dict = dict()\n gt_loss = cross_entropy_fn(outputs, labels)\n reg_loss = 0\n for i in range(len(reg_factors)):\n reg_loss += torch.mean((torch.pow(reg_factors[i]-1, 2)*torch.pow(reg_factors[i]+1, 2)))\n reg_loss = reg_loss / len(reg_factors)\n loss_dict['loss'] = gt_loss + reg_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['reg_loss'] = reg_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef regularization_temp(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n def loss_fn(outputs, labels, reg_factors, **_):\n loss_dict = dict()\n gt_loss = cross_entropy_fn(outputs, labels)\n reg_loss = 0\n for i in range(len(reg_factors)):\n reg_loss += torch.mean((torch.pow(reg_factors[i]-1, 2)*torch.pow(reg_factors[i]+1, 2)))\n reg_loss = reg_loss / len(reg_factors)\n loss_dict['loss'] = gt_loss + reg_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['reg_loss'] = reg_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef get_loss(config):\n f = globals().get(config.loss.name)\n return f(**config.loss.params)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.L1Loss",
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Payuing/evoDNN | [
"79b727d5062a27d3f8e95f175c509613f52e58aa",
"79b727d5062a27d3f8e95f175c509613f52e58aa",
"79b727d5062a27d3f8e95f175c509613f52e58aa",
"79b727d5062a27d3f8e95f175c509613f52e58aa"
] | [
"legacy/src/EvoNN.py",
"src/dataset/yeast_dataloader.py",
"src/evolver.py",
"src/dataset/abalone_dataloader.py"
] | [
"\"\"\"\r\nA module to implement the evolutionary algorithm for\r\na feedforward neural network.\r\nCrossover and mutation\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\n\r\nimport sys\r\nimport math\r\nimport csv\r\nimport warnings\r\nimport numpy as np\r\nimport random\r\nimport copy\r\nfrom datetime import datetime\r\nwarnings.filterwarnings(\"ignore\") # never print matching warnings\r\nsys.path.append(\"/Users/Payu/Desktop/EvoNN_package/EvoNN_DNN\") #thrid party's libararies, absolute path\r\n\r\n\"\"\"Constant\"\"\"\r\nNUM_LAYERS = 5 # Assertion test number of layers\r\n\r\n\"\"\"Activation function\"\"\"\r\ndef sigmoid(x):\r\n\treturn 1/(1+np.exp(-x))\r\n\r\ndef tanh(x):\r\n\treturn np.tanh(x)\r\n\r\n\"\"\"Loss function\"\"\"\r\ndef RMSE(y_predicted, y_true):\r\n\ty_predicted = y_predicted.reshape((y_predicted.shape[0],))\r\n\treturn np.sqrt(np.mean((y_predicted - y_true)**2))\r\n\r\n\"\"\"Return predicted value array\"\"\"\r\ndef Identity(final_layer_values):\r\n\treturn final_layer_values[:]\r\n\r\nclass Evolver:\r\n\tdef __init__(\tself,\r\n\t\t\t\t\tG=10,\t\t\t\t\t\t\t\t# Maximum iteration\r\n\t\t\t\t\tearly_stopping=10,\t\t\t\t\t# Minimum iteration\r\n\t\t\t\t\tnode_per_layer = [10],\t\t\t\t# Number of nodes per layer\r\n\t\t\t\t\tMU=10,\t\t\t\t\t\t\t\t# Number of parents\r\n\t\t\t\t\tLAMBDA=10,\t\t\t\t\t\t\t# Number of offspring\r\n\t\t\t\t\tP_m=0.1,\t\t\t\t\t\t\t# Weight mutation probability\r\n\t\t\t\t\tP_mf=0.1,\t\t\t\t\t\t\t# Function mutation probablity\r\n\t\t\t\t\tR_m=1.0,\t\t\t\t\t\t\t# Weight mutation radius\r\n\t\t\t\t\tP_c=0.5,\t\t\t\t\t\t\t# Crossover proportion\r\n\t\t\t\t\tP_b=0.01,\t\t\t\t\t\t\t# Bias mutation probablity\r\n\t\t\t\t\tR_b=1.0,\t\t\t\t\t\t\t\t# Bias mutation radius\r\n\t\t\t\t\telitism=True,\t\t\t\t\t\t# Elitism involves copying a small proportion of the fittest candidates, unchanged, into the next generation.\r\n\t\t\t\t\ttournament_size=2,\t\t\t\t\t# Selecting individuals from a population\r\n\t\t\t\t\tfitness_function=RMSE,\r\n\t\t\t\t\tfinal_activation_function=Identity,\r\n\t\t\t\t\tadditional_functions=[],\r\n\t\t\t\t\trandom_state=None,\r\n\t\t\t\t\tverbose=0):\r\n\r\n\t\tself.generation_number = G\r\n\t\tself.early_stopping = early_stopping\r\n\t\tself.node_per_layer = node_per_layer\r\n\t\tself.mu = MU\r\n\t\tself.lam = LAMBDA\r\n\t\tself.P_M = P_m\r\n\t\tself.P_MF = P_mf\r\n\t\tself.P_C = P_c\r\n\t\tself.R_M = R_m\r\n\t\tself.P_B = P_b\r\n\t\tself.R_B = R_b\r\n\t\tself.ELITISM = elitism\r\n\t\tself.TOURNAMENT_SIZE = tournament_size\r\n\t\tself.fitness = fitness_function\r\n\t\tself.final_activation = final_activation_function\r\n\t\tself.functions = {0: sigmoid,\r\n 1: tanh}\t\t\t\t\t\t# Using a dictionary to select function\r\n\t\tif (random_state is not None):\r\n\t\t\ttime_seconds = int(datetime.now().timestamp())\t# Python 3.3+ only\r\n\t\t\tnp.random.seed(random_state + time_seconds)\t\t# add system time to generate random number\r\n\t\t\trandom.seed(random_state + time_seconds)\r\n\t\tself.verbose = verbose\r\n\t\tself.final_population = None\r\n\t\tself.best_individual = None\r\n\r\n\t\tkey = len(self.functions)\t# add additional activation functions\r\n\t\tfor additional_function in additional_functions:\r\n\t\t\tself.functions[key] = additional_function\r\n\t\t\tkey += 1\r\n\t\tprint(\"Network has {} layers, they are {}.\".format(len(self.node_per_layer), self.node_per_layer))\r\n\r\n ######################################################################################\r\n\t\"\"\"Train the EvoNN\"\"\"\r\n\tdef fit(self, X_train, Y_train, X_val = None, Y_val = None):\r\n\t\t#initialization\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Input is a {} X {} matrix\".format(X_train.shape[0], X_train.shape[1]))\r\n\t\t\tif (X_val is not None):\r\n\t\t\t\tprint(\"Validation is a {} X {} matrix\".format(X_val.shape[0], X_val.shape[1]))\r\n\t\tself.X_train = X_train\r\n\t\tself.Y_train = Y_train\r\n\t\tself.X_val = X_val\r\n\t\tself.Y_val = Y_val\r\n\t\tself.feature_number = X_train.shape[1]\r\n\t\ttry:\r\n\t\t\tself.output_number = Y_train.shape[1]\t# more than one column\r\n\t\texcept IndexError:\r\n\t\t\tself.output_number = 1\r\n\t\toffspring = []\r\n\t\tpopulation = self.initialize_population() # \"mu\" used at start to create a populatiion pool of network\r\n\t\taverage_fitness, average_fitness_validate, best_fitness_train, best_fitness_validate, best_individual = self.evaluate_population(population)\r\n\t\tvalidate_timer = 0\r\n\t\tbest_fitness_validate_of_all_generations = best_fitness_validate # the smaller the better\r\n\t\tbest_individual_validate = best_individual\r\n\r\n\t\tcurr_generation_number = 1\r\n\t\twhile ((curr_generation_number < self.generation_number + 1) and (self.early_stopping > validate_timer)):\r\n\t\t\tif (curr_generation_number % 5 == 0):\r\n\t\t\t\tprint(\"run for {} generations\".format(curr_generation_number))\r\n\r\n\t\t\tif (self.verbose >= 1):\r\n\t\t\t\tprintout_statement = \"Generation \"+str(curr_generation_number)\r\n\t\t\t\tprintout_statement += \"\\tTrain \"\r\n\t\t\t\tprintout_statement += \"\\tbest fitness train: \"+str(best_fitness_train)\r\n\t\t\t\tif (self.X_val is not None):\r\n\t\t\t\t\tprintout_statement += \"\\tValidate \"\r\n\t\t\t\t\tprintout_statement += \"\\tbest fitness: \"+str(best_fitness_validate_of_all_generations)\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Please specify validate set.\")\r\n\t\t\t\t\texit()\r\n\t\t\t\tprint(printout_statement)\r\n\r\n\t\t\toffspring = self.make_offspring(population) # a list of offspring; use lam to generate\r\n\t\t\tfor theOffspring in offspring:\r\n\t\t\t\ttheOffspring.mutate(self.P_M, self.P_MF, self.R_M, self.P_B, self.R_B)\r\n\t\t\tpopulation = [] # generate new population\r\n\r\n\t\t\t# Only one best individual\r\n\t\t\tif (self.ELITISM == True):\r\n\t\t\t\tcopy_individual = EvoNN.copyIndividual(best_individual)\r\n\t\t\t\tpopulation.append(copy_individual)\r\n\t\t\t\tinit_range = 1\r\n\t\t\telse:\r\n\t\t\t\tinit_range = 0\r\n\r\n\t\t\t\"\"\"Generate next parent generation\"\"\"\r\n\t\t\tfor i in range(init_range, self.mu):\r\n\t\t\t\ttheOriginal = self.tournament_selection(offspring, self.TOURNAMENT_SIZE)\r\n\t\t\t\tcopy_individual = EvoNN.copyIndividual(theOriginal)\r\n\t\t\t\tpopulation.append(copy_individual)\r\n\r\n\t\t\taverage_fitness, average_fitness_validate, best_fitness_train, best_fitness_validate, best_individual = self.evaluate_population(population)\r\n\r\n\t\t\tif (self.X_val is not None):\r\n\t\t\t\tif (best_fitness_validate < best_fitness_validate_of_all_generations):\r\n\t\t\t\t\tbest_fitness_validate_of_all_generations = best_fitness_validate\r\n\t\t\t\t\tbest_individual_validate = copy.deepcopy(best_individual)\r\n\t\t\t\t\tvalidate_timer = 0\r\n\t\t\t\telse:\r\n\t\t\t\t\tvalidate_timer += 1 # if no improvement\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Please specify validate set.\")\r\n\t\t\t\texit()\r\n\r\n\t\t\tcurr_generation_number += 1\r\n\r\n\t\tself.best_individual = copy.deepcopy(best_individual_validate)\r\n\t\tself.final_population = copy.deepcopy(population)\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(self.best_individual)\r\n\r\n\t######################################################################################\r\n\t\"\"\"\"Predict on test dataset\"\"\"\r\n\tdef predict_proba(self, X_test):\r\n\t\treturn self.best_individual.get_output(X_test)\r\n\r\n ######################################################################################\r\n\t\"\"\"Predict on test dataset\"\"\"\r\n\tdef predict(self, X_test):\r\n\t\treturn self.best_individual.get_output(X_test)\r\n\r\n ######################################################################################\r\n\tdef initialize_population(self):\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Initializing population...\")\r\n\r\n\t\tmy_population = []\r\n\t\tfor i in range(self.mu):\r\n\t\t\ttheIndividual = EvoNN.newIndividual(self.feature_number, self.output_number, self.final_activation, hidden_size = self.node_per_layer, function_dictionary = self.functions)\r\n\t\t\tmy_population.append(theIndividual) # theIndividual is a standalone network\r\n\t\t\tif (self.verbose >= 1):\r\n\t\t\t\tprint(\"\\t\\t\\t {}\".format(my_population[i]))\r\n\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Population initialized\")\r\n\t\treturn my_population\r\n\r\n ######################################################################################\r\n\tdef evaluate_population(self, the_population):\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Evaluating population\")\r\n\r\n\t\t\"\"\"\"Initialize parameters\"\"\"\r\n\t\taverage_fitness_train = 0.0 # the whole population\r\n\t\taverage_fitness_validate = 0.0\r\n\r\n\t\tpopulation_count_train = 0\r\n\t\tpopulation_count_validate = 0\r\n\r\n\t\tbest_fitness_train = the_population[0].fitness\r\n\t\tbest_fitness_validate = the_population[0].fitness\r\n\r\n\t\tbest_individual = the_population[0]\r\n\r\n\t\tfor individual in the_population:\r\n\t\t\tY_predict = individual.get_output(self.X_train)\r\n\t\t\tfitness_value_train = self.fitness(Y_predict, self.Y_train) # Y_train is a 2d one-hot coding matrix\r\n\t\t\tindividual.fitness = fitness_value_train\r\n\r\n\t\t\tif not (math.isnan(fitness_value_train)):\r\n\t\t\t\taverage_fitness_train += fitness_value_train\r\n\t\t\t\tpopulation_count_train += 1\r\n\r\n\t\t\t\"\"\"best_fitness_train: the smaller the better\"\"\"\r\n\t\t\tif (fitness_value_train < best_fitness_train):\r\n\t\t\t\tbest_fitness_train = fitness_value_train\r\n\t\t\t\tbest_individual = individual\r\n\r\n\t\t\tif (self.X_val is not None):\r\n\t\t\t\tY_val_predict = individual.get_output(self.X_val)\r\n\t\t\t\tfitness_value_validate = self.fitness(Y_val_predict, self.Y_val)\r\n\t\t\t\taverage_fitness_validate += fitness_value_validate\r\n\t\t\t\tpopulation_count_validate += 1\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Please speficy validate dataset\")\r\n\t\t\t\texit()\r\n\r\n\t\tY_val_predict = best_individual.get_output(self.X_val)\r\n\t\tbest_fitness_validate = self.fitness(Y_val_predict, self.Y_val)\r\n\r\n\t\taverage_fitness_train /= population_count_train\r\n\t\taverage_fitness_validate /= population_count_validate\r\n\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Population evaluated\")\r\n\r\n\t\treturn average_fitness_train, average_fitness_validate, best_fitness_train, best_fitness_validate, best_individual\r\n\r\n ######################################################################################\r\n\tdef make_offspring(self, the_population):\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Making offspring\")\r\n\r\n\t\toffspring_population = []\r\n\t\tfor i in range(self.lam):\r\n\t\t\toffspring_population.append(self.create_offspring(the_population))\r\n\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Made offspring\")\r\n\r\n\t\treturn offspring_population\r\n\r\n ######################################################################################\r\n\tdef create_offspring(self, the_population):\r\n\r\n\t\trandom_chance = random.random()\r\n\t\tif (random_chance <= self.P_C): # crossover proportion\r\n\t\t\tparent1 = self.tournament_selection(the_population)\r\n\t\t\tparent2 = self.tournament_selection(the_population)\r\n\t\t\ttheIndividual = EvoNN.crossoverIndividual(parent1, parent2)\r\n\t\t\tassert len(theIndividual.hidden_layer_size) == NUM_LAYERS # test number of layers is correct\r\n\t\t\treturn theIndividual\r\n\t\telse:\r\n\t\t\toriginal = self.tournament_selection(the_population)\r\n\t\t\ttheIndividual = EvoNN.copyIndividual(original)\r\n\t\t\tassert len(theIndividual.hidden_layer_size) == NUM_LAYERS # test number of layers is correct\r\n\t\t\treturn theIndividual\r\n\r\n ######################################################################################\r\n\t\"\"\"\"Tournament selection\"\"\"\r\n\tdef tournament_selection(self, the_population, tournament_size=2):\r\n\r\n\t\tpopulation_size = len(the_population)\r\n\t\tthe_tournament = []\r\n\t\tfor i in range(tournament_size):\r\n\t\t\tthe_tournament.append(the_population[random.randint(0, population_size-1)])\r\n\r\n\t\t\"\"\"Initialization\"\"\"\r\n\t\tbest_fitness = the_tournament[0].fitness\r\n\t\tbest_individual = the_tournament[0]\r\n\t\tfor i in range(1, tournament_size):\r\n\t\t\tif (the_tournament[i].fitness < best_fitness):\r\n\t\t\t\tbest_fitness = the_tournament[i].fitness\r\n\t\t\t\tbest_individual = the_tournament[i]\r\n\r\n\t\treturn best_individual\r\n\r\n##########################################################################################\r\nclass EvoNN:\r\n\r\n\tdefault_function_dictionary = {0: sigmoid,\r\n 1: tanh}\r\n\r\n##########################################################################################\r\n\tdef __init__(self):\r\n\t\tpass\r\n\r\n##########################################################################################\r\n\t\"\"\"\"Generate new standalone feedforward network\"\"\"\r\n\t@classmethod\r\n\tdef newIndividual(cls, input_size, output_size, final_activation_function, hidden_size=[10], function_dictionary = None):\r\n\r\n\t\ttheIndividual = cls()\t\t\t#theIndividual is a class\r\n\t\tif (function_dictionary is None):\r\n\t\t\ttheIndividual.function_dictionary = self.default_function_dictionary\r\n\t\telse:\r\n\t\t\ttheIndividual.function_dictionary = function_dictionary\r\n\t\ttheIndividual.fitness = float('inf')\t\t# initial fitness is inf\r\n\t\ttheIndividual.input_size = input_size\r\n\r\n\t\ttheIndividual.hidden_layer_size = hidden_size # number of layers, a list\r\n\t\tnum_hidden_layers = len(theIndividual.hidden_layer_size)\r\n\r\n\t\ttheIndividual.hidden_layer_bias = [] # a list of numpy 1d array\r\n\t\ttheIndividual.hidden_layer_functions = [] # a list of numpy 1d array\r\n\t\tfor node_size in hidden_size: # hidden_size is a list\r\n\t\t\ttheIndividual.hidden_layer_bias.append(np.random.uniform(size=(node_size)))\r\n\t\t\ttheIndividual.hidden_layer_functions.append(np.random.randint( len(theIndividual.function_dictionary.keys()), size=node_size ))\r\n\r\n\t\ttheIndividual.output_size = output_size\r\n\t\ttheIndividual.final_activation = final_activation_function # softmax, probability function\r\n\r\n\t\ttheIndividual.input_to_hidden_matrix = np.random.uniform(size=(\tinput_size, hidden_size[0]))\r\n\t\tif (num_hidden_layers > 1):\r\n\t\t\ttheIndividual.hidden_to_hidden_matrix = []\r\n\t\t\tfor curr_layer in range(num_hidden_layers - 1):\r\n\t\t\t\ttheIndividual.hidden_to_hidden_matrix.append(np.random.uniform(size=(hidden_size[curr_layer], hidden_size[curr_layer + 1])))\r\n\t\ttheIndividual.hidden_to_output_matrix = np.random.uniform(size=( hidden_size[-1], output_size))\r\n\r\n\t\treturn theIndividual\r\n\r\n##########################################################################################\r\n\t@classmethod\r\n\tdef crossoverIndividual(cls, individual1, individual2):\r\n\r\n\t\ttheIndividual = cls() # the offspring individual\r\n\t\ttheIndividual.function_dictionary = individual1.function_dictionary\r\n\r\n\t\tinput_size = individual1.input_to_hidden_matrix.shape[0]\r\n\t\toutput_size = individual1.hidden_to_output_matrix.shape[1]\r\n\r\n\t\ttheIndividual.fitness = float('inf')\r\n\t\ttheIndividual.input_size = input_size\r\n\r\n\t\thidden_size = individual1.hidden_layer_size # a list array\r\n\t\tnum_hidden_layers = len(hidden_size)\r\n\r\n\t\t# generate offspring arch\r\n\t\ttheIndividual.hidden_layer_size = copy.deepcopy(hidden_size)\r\n\t\ttheIndividual.hidden_layer_bias = []\r\n\t\ttheIndividual.hidden_layer_functions = []\r\n\t\tfor node_size in hidden_size:\r\n\t\t\ttheIndividual.hidden_layer_bias.append(np.zeros(node_size))\r\n\t\t\ttheIndividual.hidden_layer_functions.append(np.zeros(node_size))\r\n\r\n\t\ttheIndividual.output_size = output_size\r\n\t\ttheIndividual.final_activation = individual1.final_activation\r\n\r\n\t\t\"\"\"crossover activation function and bias\"\"\"\r\n\t\tfor layer in range(num_hidden_layers):\r\n\t\t\t# crossover activation function\r\n\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[layer]))\r\n\t\t\ttheIndividual.hidden_layer_functions[layer][probablity_matrix <= 0.5] = individual1.hidden_layer_functions[layer][probablity_matrix <= 0.5]\r\n\t\t\ttheIndividual.hidden_layer_functions[layer][probablity_matrix > 0.5] = individual2.hidden_layer_functions[layer][probablity_matrix > 0.5]\r\n\r\n\t\t\t# crossover bias\r\n\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[layer]))\r\n\t\t\ttheIndividual.hidden_layer_bias[layer][probablity_matrix <= 0.5] = individual1.hidden_layer_bias[layer][probablity_matrix <= 0.5]\r\n\t\t\ttheIndividual.hidden_layer_bias[layer][probablity_matrix > 0.5] = individual2.hidden_layer_bias[layer][probablity_matrix > 0.5]\r\n\r\n\t\t\"\"\"crossover weight matrix\"\"\"\r\n\t\t# input to hidden matrix\r\n\t\ttheIndividual.input_to_hidden_matrix = np.zeros((input_size, hidden_size[0]))\r\n\t\tprobablity_matrix = np.random.uniform(size=(input_size, hidden_size[0]))\r\n\r\n\t\ttheIndividual.input_to_hidden_matrix[probablity_matrix <= 0.5] = individual1.input_to_hidden_matrix[probablity_matrix <= 0.5]\r\n\t\ttheIndividual.input_to_hidden_matrix[probablity_matrix > 0.5] = individual2.input_to_hidden_matrix[probablity_matrix > 0.5]\r\n\r\n\t\t# hidden to hidden matrix\r\n\t\tif (num_hidden_layers > 1):\r\n\t\t\ttheIndividual.hidden_to_hidden_matrix = []\r\n\t\t\tfor curr_layer in range(num_hidden_layers - 1):\r\n\t\t\t\tnew_hidden_to_hidden_matrix = np.zeros((hidden_size[curr_layer], hidden_size[curr_layer + 1]))\r\n\t\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[curr_layer], hidden_size[curr_layer + 1]))\r\n\r\n\t\t\t\tnew_hidden_to_hidden_matrix[probablity_matrix <= 0.5] = individual1.hidden_to_hidden_matrix[curr_layer][probablity_matrix <= 0.5]\r\n\t\t\t\tnew_hidden_to_hidden_matrix[probablity_matrix > 0.5] = individual2.hidden_to_hidden_matrix[curr_layer][probablity_matrix > 0.5]\r\n\r\n\t\t\t\ttheIndividual.hidden_to_hidden_matrix.append(new_hidden_to_hidden_matrix)\r\n\r\n\t\t# hidden to output matrix\r\n\t\ttheIndividual.hidden_to_output_matrix = np.zeros((hidden_size[-1], output_size))\r\n\t\tprobablity_matrix = np.random.uniform(size=((hidden_size[-1], output_size)))\r\n\r\n\t\ttheIndividual.hidden_to_output_matrix[probablity_matrix <= 0.5] = individual1.hidden_to_output_matrix[probablity_matrix <= 0.5]\r\n\t\ttheIndividual.hidden_to_output_matrix[probablity_matrix > 0.5] = individual2.hidden_to_output_matrix[probablity_matrix > 0.5]\r\n\r\n\t\treturn theIndividual\r\n\r\n##########################################################################################\r\n\t\"\"\"\"Deep copy individual\"\"\"\r\n\t@classmethod\r\n\tdef copyIndividual(cls, theOriginal):\r\n\r\n\t\ttheIndividual = cls()\r\n\t\ttheIndividual.function_dictionary = theOriginal.function_dictionary\r\n\r\n\t\tinput_size = theOriginal.input_to_hidden_matrix.shape[0]\r\n\t\toutput_size = theOriginal.hidden_to_output_matrix.shape[1]\r\n\r\n\t\ttheIndividual.fitness = float('inf')\r\n\t\ttheIndividual.input_size = input_size\r\n\t\ttheIndividual.hidden_layer_size = copy.deepcopy(theOriginal.hidden_layer_size)\r\n\r\n\t\t# deep copy bias and activation function\r\n\t\ttheIndividual.hidden_layer_bias = copy.deepcopy(theOriginal.hidden_layer_bias)\r\n\t\ttheIndividual.hidden_layer_functions = copy.deepcopy(theOriginal.hidden_layer_functions)\r\n\r\n\t\ttheIndividual.output_size = output_size\r\n\t\ttheIndividual.final_activation = theOriginal.final_activation\r\n\r\n\t\t# deep copy weight matrix\r\n\t\ttheIndividual.input_to_hidden_matrix = copy.deepcopy(theOriginal.input_to_hidden_matrix)\r\n\t\tif (len(theIndividual.hidden_layer_size) > 1):\r\n\t\t\ttheIndividual.hidden_to_hidden_matrix = copy.deepcopy(theOriginal.hidden_to_hidden_matrix)\r\n\t\ttheIndividual.hidden_to_output_matrix = copy.deepcopy(theOriginal.hidden_to_output_matrix)\r\n\r\n\t\treturn theIndividual\r\n\r\n##########################################################################################\r\n\tdef mutate_matrix(self, the_matrix, probablity, radius):\r\n\r\n\t\tprobablity_matrix = np.random.uniform(size=(the_matrix.shape))\r\n\t\tmutation_matrix = np.random.uniform(low = -radius, high=radius, size=(the_matrix.shape))\r\n\t\tthe_matrix[probablity_matrix <= probablity] += mutation_matrix[probablity_matrix <= probablity]\r\n\r\n\t\treturn the_matrix\r\n\r\n##########################################################################################\r\n\tdef mutate(self, P_m, P_mf, R_m, P_b, R_b):\r\n\r\n\t\tinput_size = self.input_size\r\n\t\thidden_size= self.hidden_layer_size # a list\r\n\t\tnum_hidden_layers = len(self.hidden_layer_size)\r\n\t\toutput_size = self.hidden_to_output_matrix.shape[1]\r\n\r\n\t\t\"\"\"\"Mutate input to hidden matrix\"\"\"\r\n\t\tself.input_to_hidden_matrix = self.mutate_matrix(self.input_to_hidden_matrix, P_m, R_m)\r\n\r\n\t\t\"\"\"\"Mutate activation function and bias\"\"\"\r\n\t\tfunction_number = len(self.function_dictionary.keys())\r\n\r\n\t\tfor layer in range(num_hidden_layers):\r\n\t\t\t# mutate activation function\r\n\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[layer]))\r\n\t\t\tfunction_mutation_matrix = np.random.randint(0, function_number - 1,size=(hidden_size[layer]))\r\n\t\t\tself.hidden_layer_functions[layer][probablity_matrix <= P_mf] = function_mutation_matrix[probablity_matrix <= P_mf]\r\n\r\n\t\t\t# mutate bias\r\n\t\t\tself.hidden_layer_bias[layer] = self.mutate_matrix(self.hidden_layer_bias[layer], P_b, R_b)\r\n\r\n\t\t\"\"\"Mutate hidden to hidden matrix\"\"\"\r\n\t\tif (num_hidden_layers > 1):\r\n\t\t\tfor layer in range(num_hidden_layers - 1):\r\n\t\t\t\tself.hidden_to_hidden_matrix[layer] = self.mutate_matrix(self.hidden_to_hidden_matrix[layer], P_m, R_m)\r\n\r\n\t\t\"\"\"Mutate hidden to output matrix\"\"\"\r\n\t\tself.hidden_to_output_matrix = self.mutate_matrix(self.hidden_to_output_matrix, P_m, R_m)\r\n\r\n##########################################################################################\r\n\t\"\"\"Output is a 2d (sample_size, classification_number) array\"\"\"\r\n\tdef get_output(self, X_train):\r\n\r\n\t\tsample_size = X_train.shape[0]\r\n\t\thidden_layer_input = np.dot(X_train, self.input_to_hidden_matrix) + np.tile(self.hidden_layer_bias[0], (sample_size, 1)) # y = wx+b\r\n\r\n\t\tfor i in range(hidden_layer_input.shape[1]): # z = f(wx+b)\r\n\t\t\tfunctionIndex = self.hidden_layer_functions[0][i]\r\n\t\t\tmyFunction = self.function_dictionary[functionIndex]\r\n\t\t\thidden_layer_input[:, i] = myFunction(hidden_layer_input[:, i])\r\n\r\n\t\thidden_layer_matrix = np.copy(hidden_layer_input) # deep copy\r\n\t\tif (len(self.hidden_layer_size) > 1):\r\n\t\t\tfor i in range(len(self.hidden_layer_size) - 1): # aw+b\r\n\t\t\t\thidden_layer_matrix = np.dot(hidden_layer_matrix, self.hidden_to_hidden_matrix[i]) + np.tile(self.hidden_layer_bias[i+1],(sample_size, 1)) # y = wx+b\r\n\r\n\t\t\t\tfor j in range(hidden_layer_matrix.shape[1]): # z = f(wx+b)\r\n\t\t\t\t\tfunctionIndex = self.hidden_layer_functions[i+1][j]\r\n\t\t\t\t\tmyFunction = self.function_dictionary[functionIndex]\r\n\t\t\t\t\thidden_layer_matrix[:, j] = myFunction(hidden_layer_matrix[:, j])\r\n\r\n\t\toutput_layer_input = np.dot(hidden_layer_matrix, self.hidden_to_output_matrix)\r\n\r\n\t\toutput = self.final_activation(output_layer_input)\r\n\r\n\t\treturn output\r\n",
"\"\"\"\nLoad the yeast dataset from UCI ML repository\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom dataset_peek import data_peek\n\n\ndef load_yeast():\n fp = \"yeast.data.txt\"\n raw_data = pd.read_csv(fp, delim_whitespace=True, header=None)\n x = np.array(raw_data.iloc[:, 1:5])\n x1 = np.array(raw_data.iloc[:, 7:9])\n x = np.c_[x, x1]\n y = np.array(raw_data.iloc[:, 9])\n y = LabelEncoder().fit_transform(y)\n return x, y\n\n\nif __name__ == '__main__':\n data_peek(\"Yeast\", load_yeast)\n",
"import time\nimport random\n\nimport numpy as np\nimport activation_funciton as ac\nimport loss_function\nimport torch.nn as nn\nimport torch\nfrom typing import Callable\n\n\nclass Evolver:\n def __init__(self,\n max_iter: int = 10,\n early_stopping: int = 10,\n layers: np.ndarray = np.array([10], dtype = np.uint32),\n parent_size: int = 10,\n children_size: int = 10,\n weight_mutation_probability: float = 0.1,\n function_mutation_probability: float = 0.1,\n weight_mutation_radius: float = 1.0,\n crossover_proportion: float = 0.5,\n bias_mutation_probability: float = 0.01,\n bias_mutation_radius: float = 1.0,\n is_elitism: bool = True,\n tournament_size: int = 2,\n fitness_function: Callable = loss_function.rmse,\n output_layer_activation_function: Callable = ac.softmax,\n candidate_function_dict: dict = {0: ac.sigmoid},\n is_random_state: bool = False,\n verbose: bool = False):\n self.max_iter = max_iter\n self.early_stopping = early_stopping\n self.layers = layers\n self.parent_size = parent_size\n self.children_size = children_size\n self.weight_mutation_probability = weight_mutation_probability\n self.function_mutation_probability = function_mutation_probability\n self.weight_mutation_radius = weight_mutation_radius\n self.crossover_proportion = crossover_proportion\n self.bias_mutation_probability = bias_mutation_probability\n self.bias_mutation_radius = bias_mutation_radius\n self.is_elitism = is_elitism\n self.tournament_size = tournament_size\n self.fitness_function = fitness_function\n self.output_layer_activation_function = output_layer_activation_function\n self.candidate_function_dict = candidate_function_dict\n self.is_random_state = is_random_state\n self.verbose = verbose\n\n if self.is_random_state:\n seed = int(time.time())\n np.random.seed(seed)\n random.seed(seed)\n torch.random.manual_seed(seed)\n\n print(f\"Network has {len(self.layers)} layers. The topology is {self.layers}\")\n\n def fit(self, x_train: np.ndarray, y_train: np.ndarray,\n x_validate: np.ndarray = None, y_validate: np.ndarray = None):\n if self.verbose:\n print(f\"Input shape is {x_train.shape[0]} X {x_train.shape[1]}.\")\n if x_validate:\n print(f\"Validation shape is {x_validate.shape[0]} X {x_validate.shape[0]}\")\n\n",
"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom dataset_peek import data_peek\n\n\ndef load_abalone():\n fp = \"abalone.csv\"\n raw_data = pd.read_csv(fp, delimiter=',', header=None)\n x = np.array(raw_data.iloc[:, 0])\n x2 = np.array(raw_data.iloc[:, 1:8])\n x = LabelEncoder().fit_transform(x)\n x = np.c_[x, x2]\n y = np.array(raw_data.iloc[:, 8])\n return x, y\n\n\nif __name__ == '__main__':\n data_peek(\"Abalone\", load_abalone)\n"
] | [
[
"numpy.dot",
"numpy.random.seed",
"numpy.tile",
"numpy.copy",
"numpy.mean",
"numpy.exp",
"numpy.random.uniform",
"numpy.tanh",
"numpy.zeros",
"numpy.random.randint"
],
[
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"pandas.read_csv"
],
[
"numpy.array",
"numpy.random.seed",
"torch.random.manual_seed"
],
[
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jmarine/ezeeai | [
"091b4ce3bc5794c534084bff3301b15ba8a9be1a"
] | [
"ezeeai/core/explainer.py"
] | [
"from lime import lime_tabular, lime_image\nfrom scipy.misc import imresize\nimport numpy as np\nimport tensorflow as tf\n\n\nclass TabularExplainer:\n\n def __init__(self, dataset, verbose=True):\n\n train_dataset, training_labels = dataset.make_numpy_array(dataset.get_train_file())\n\n mode = dataset.get_mode()\n categorical_features, categorical_index, categorical_names = dataset.get_categorical_features()\n unique = dataset.get_target_labels()\n\n self._mode = mode\n self.dataset = dataset\n\n self._explainer = lime_tabular.LimeTabularExplainer(train_dataset,\n feature_names=dataset.get_feature_names(),\n class_names=unique,\n categorical_features=categorical_index,\n categorical_names=categorical_names,\n training_labels=training_labels,\n verbose=verbose,\n mode=self._mode)\n\n def explain_instance(self, model, features, num_features=5, top_labels=3, sel_target=None):\n\n sample = self.dataset.create_feat_array(features)\n features = {k: features[k] for k in self.dataset.get_feature_names()}\n\n def predict_fn(x):\n x = x.reshape(-1, len(features))\n\n local_features = {k: x[:, i] for i, k in enumerate(features.keys())}\n local_features = self.dataset.from_array(local_features)\n\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(x=local_features,\n y=None, num_epochs=1, shuffle=False)\n with tf.device('/cpu:0'): # TODO maybe check if gpu is free\n predictions = list(model.predict(input_fn=predict_input_fn))\n\n if self._mode == 'classification':\n return np.array([x['probabilities'] for x in predictions])\n\n if sel_target:\n tidx = self.dataset.get_targets().index(sel_target)\n return np.array([x['predictions'][tidx] for x in predictions]).reshape(-1)\n\n return np.array([x['predictions'] for x in predictions]).reshape(-1)\n\n if self._mode == 'classification':\n return self._explainer.explain_instance(sample, predict_fn, num_features=num_features,\n top_labels=top_labels)\n\n return self._explainer.explain_instance(sample, predict_fn, num_features=num_features)\n\n\nclass ImageExplainer:\n\n def __init__(self, dataset, verbose=True):\n self._dataset = dataset\n self._explainer = lime_image.LimeImageExplainer(verbose=verbose)\n\n def explain_instance(self, model, features, num_features=5, top_labels=3, sel_target=None):\n def predict_fn(x):\n x = x.astype(np.float32)\n x = np.apply_along_axis(self._dataset.normalize, 0, x)\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(x=x, y=None, num_epochs=1, shuffle=False)\n with tf.device('/cpu:0'): # TODO maybe check if gpu is free\n probabilities = list(model.predict(input_fn=predict_input_fn))\n return np.array([x['probabilities'] for x in probabilities])\n\n features = imresize(features, self._dataset.get_image_size(), interp='bilinear')\n\n explain_result = self._explainer.explain_instance(features, predict_fn, batch_size=100,\n num_features=num_features,\n labels=self._dataset.get_class_names(),\n top_labels=len(self._dataset.get_class_names()))\n\n features = features.astype(np.float32)\n\n features = self._dataset.normalize(features)\n\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(x=features[np.newaxis, ...], y=None, num_epochs=1,\n shuffle=False)\n with tf.device('/cpu:0'): # TODO maybe check if gpu is free\n predictions = list(model.predict(input_fn=predict_input_fn))\n\n return explain_result, predictions[0]['probabilities']\n"
] | [
[
"numpy.array",
"tensorflow.estimator.inputs.numpy_input_fn",
"numpy.apply_along_axis",
"tensorflow.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
murthyn/composer | [
"2a04cf387dd8558556500f7ef2bc6d3d131043d5",
"2a04cf387dd8558556500f7ef2bc6d3d131043d5"
] | [
"composer/models/resnets.py",
"composer/datasets/coco.py"
] | [
"# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"The CIFAR ResNet torch module.\n\nSee the :doc:`Model Card </model_cards/resnet>` for more details.\n\"\"\"\n\n# Code below adapted from https://github.com/facebookresearch/open_lth\n# and https://github.com/pytorch/vision\n\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom composer.models import Initializer\n\n__all__ = [\"CIFAR_ResNet\"]\n\n\nclass CIFAR_ResNet(nn.Module):\n \"\"\"A residual neural network as originally designed for CIFAR-10.\"\"\"\n\n class Block(nn.Module):\n \"\"\"A ResNet block.\"\"\"\n\n def __init__(self, f_in: int, f_out: int, downsample: bool = False):\n super(CIFAR_ResNet.Block, self).__init__()\n\n stride = 2 if downsample else 1\n self.conv1 = nn.Conv2d(f_in, f_out, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(f_out)\n self.conv2 = nn.Conv2d(f_out, f_out, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(f_out)\n self.relu = nn.ReLU(inplace=True)\n\n # No parameters for shortcut connections.\n if downsample or f_in != f_out:\n self.shortcut = nn.Sequential(\n nn.Conv2d(f_in, f_out, kernel_size=1, stride=2, bias=False),\n nn.BatchNorm2d(f_out),\n )\n else:\n self.shortcut = nn.Sequential()\n\n def forward(self, x: torch.Tensor):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n return self.relu(out)\n\n def __init__(self, plan: List[Tuple[int, int]], initializers: List[Initializer], outputs: int = 10):\n super(CIFAR_ResNet, self).__init__()\n outputs = outputs or 10\n\n self.num_classes = outputs\n\n # Initial convolution.\n current_filters = plan[0][0]\n self.conv = nn.Conv2d(3, current_filters, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(current_filters)\n self.relu = nn.ReLU(inplace=True)\n\n # The subsequent blocks of the ResNet.\n blocks = []\n for segment_index, (filters, num_blocks) in enumerate(plan):\n for block_index in range(num_blocks):\n downsample = segment_index > 0 and block_index == 0\n blocks.append(CIFAR_ResNet.Block(current_filters, filters, downsample))\n current_filters = filters\n\n self.blocks = nn.Sequential(*blocks)\n\n # Final fc layer. Size = number of filters in last segment.\n self.fc = nn.Linear(plan[-1][0], outputs)\n self.criterion = nn.CrossEntropyLoss()\n\n for initializer in initializers:\n initializer = Initializer(initializer)\n self.apply(initializer.get_initializer())\n\n def forward(self, x: torch.Tensor):\n out = self.relu(self.bn(self.conv(x)))\n out = self.blocks(out)\n out = F.avg_pool2d(out, out.size()[3])\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n @staticmethod\n def is_valid_model_name(model_name: str):\n valid_model_names = [f\"cifar_resnet_{layers}\" for layers in (20, 56)]\n return (model_name in valid_model_names)\n\n @staticmethod\n def get_model_from_name(model_name: str, initializers: List[Initializer], outputs: int = 10):\n \"\"\"The naming scheme for a ResNet is ``'cifar_resnet_D[_W]'``.\n\n D is the model depth (e.g. ``'cifar_resnet56'``)\n \"\"\"\n\n if not CIFAR_ResNet.is_valid_model_name(model_name):\n raise ValueError('Invalid model name: {}'.format(model_name))\n\n depth = int(model_name.split('_')[2])\n if len(model_name.split('_')) == 3:\n width = 16\n else:\n width = int(model_name.split('_')[4])\n\n if (depth - 2) % 3 != 0:\n raise ValueError('Invalid CIFAR_ResNet depth: {}'.format(depth))\n num_blocks = (depth - 2) // 6\n\n model_arch = {\n 56: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],\n 20: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],\n }\n\n return CIFAR_ResNet(model_arch[depth], initializers, outputs)\n",
"# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"COCO (Common Objects in Context) dataset.\n\nCOCO is a large-scale object detection, segmentation, and captioning dataset. Please refer to the `COCO dataset\n<https://cocodataset.org>`_ for more details.\n\"\"\"\nimport json\nimport os\nfrom dataclasses import dataclass\nfrom typing import Sequence\n\nimport torch\nfrom PIL import Image\n\n\ndef _isArrayLike(obj):\n return hasattr(obj, '__iter__') and hasattr(obj, '__len__')\n\n\nfrom composer.core import DataSpec\nfrom composer.core.types import Batch\nfrom composer.datasets.dataloader import DataLoaderHparams\nfrom composer.datasets.hparams import DatasetHparams\nfrom composer.models.ssd.utils import SSDTransformer, dboxes300_coco\nfrom composer.utils import dist\n\n__all__ = [\"COCODatasetHparams\", \"COCODetection\"]\n\n\n@dataclass\nclass COCODatasetHparams(DatasetHparams):\n \"\"\"Defines an instance of the COCO Dataset.\"\"\"\n\n def initialize_object(self, batch_size: int, dataloader_hparams: DataLoaderHparams):\n\n if self.datadir is None:\n raise ValueError(\"datadir is required.\")\n\n dboxes = dboxes300_coco()\n\n input_size = 300\n train_trans = SSDTransformer(dboxes, (input_size, input_size), val=False, num_cropping_iterations=1)\n val_trans = SSDTransformer(dboxes, (input_size, input_size), val=True)\n data = self.datadir\n\n val_annotate = os.path.join(data, \"annotations/instances_val2017.json\")\n val_coco_root = os.path.join(data, \"val2017\")\n\n train_annotate = os.path.join(data, \"annotations/instances_train2017.json\")\n train_coco_root = os.path.join(data, \"train2017\")\n\n train_coco = COCODetection(train_coco_root, train_annotate, train_trans)\n val_coco = COCODetection(val_coco_root, val_annotate, val_trans)\n\n if self.is_train:\n return DataSpec(dataloader=dataloader_hparams.initialize_object(\n dataset=train_coco,\n batch_size=batch_size,\n sampler=dist.get_sampler(train_coco, drop_last=self.drop_last, shuffle=self.shuffle),\n drop_last=self.drop_last,\n ),\n split_batch=split_dict_fn)\n else:\n return DataSpec(dataloader=dataloader_hparams.initialize_object(\n dataset=val_coco,\n drop_last=self.drop_last,\n batch_size=batch_size,\n sampler=None,\n ),\n split_batch=split_dict_fn)\n\n\nimport torch.utils.data as data\n\n\nclass COCODetection(data.Dataset):\n \"\"\"PyTorch Dataset for the COCO dataset.\n\n Args:\n img_folder (str): the path to the COCO folder.\n annotate_file (str): path to a file that contains image id, annotations (e.g., bounding boxes and object\n classes) etc.\n transform (torch.nn.Module): transformations to apply to the image.\n \"\"\"\n\n def __init__(self, img_folder, annotate_file, transform=None):\n self.img_folder = img_folder\n self.annotate_file = annotate_file\n\n # Start processing annotation\n with open(annotate_file) as fin:\n self.data = json.load(fin)\n\n self.images = {}\n\n self.label_map = {}\n self.label_info = {}\n # 0 stands for the background\n cnt = 0\n self.label_info[cnt] = \"background\"\n for cat in self.data[\"categories\"]:\n cnt += 1\n self.label_map[cat[\"id\"]] = cnt\n self.label_info[cnt] = cat[\"name\"]\n\n # build inference for images\n for img in self.data[\"images\"]:\n img_id = img[\"id\"]\n img_name = img[\"file_name\"]\n img_size = (img[\"height\"], img[\"width\"])\n if img_id in self.images:\n raise Exception(\"dulpicated image record\")\n self.images[img_id] = (img_name, img_size, [])\n\n # read bboxes\n for bboxes in self.data[\"annotations\"]:\n img_id = bboxes[\"image_id\"]\n bbox = bboxes[\"bbox\"]\n bbox_label = self.label_map[bboxes[\"category_id\"]]\n self.images[img_id][2].append((bbox, bbox_label))\n\n for k, v in list(self.images.items()):\n if len(v[2]) == 0:\n self.images.pop(k)\n\n self.img_keys = list(self.images.keys())\n self.transform = transform\n\n #@property\n def labelnum(self):\n return len(self.label_info)\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n img_id = self.img_keys[idx]\n img_data = self.images[img_id]\n fn = img_data[0]\n img_path = os.path.join(self.img_folder, fn)\n\n img = Image.open(img_path).convert(\"RGB\")\n\n htot, wtot = img_data[1]\n bbox_sizes = []\n bbox_labels = []\n\n for (l, t, w, h), bbox_label in img_data[2]:\n r = l + w\n b = t + h\n bbox_size = (l / wtot, t / htot, r / wtot, b / htot)\n bbox_sizes.append(bbox_size)\n bbox_labels.append(bbox_label)\n\n bbox_sizes = torch.tensor(bbox_sizes)\n bbox_labels = torch.tensor(bbox_labels)\n\n if self.transform != None:\n img, (htot, wtot), bbox_sizes, bbox_labels = \\\n self.transform(img, (htot, wtot), bbox_sizes, bbox_labels)\n\n return img, img_id, (htot, wtot), bbox_sizes, bbox_labels\n\n\ndef split_dict_fn(batch: Batch, num_microbatches: int) -> Sequence[Batch]: #type: ignore\n if not isinstance(batch, Sequence):\n raise ValueError(f'split_fn requires batch be a tuple of tensors, got {type(batch)}')\n img, img_id, img_size, bbox_sizes, bbox_labels = batch #type: ignore\n nm = num_microbatches\n if isinstance(img, torch.Tensor) and isinstance(img_id, torch.Tensor):\n return list(\n zip(img.chunk(nm), img_id.chunk(nm), (img_size[i:i + nm] for i in range(0, len(img_size), nm)),\n bbox_sizes.chunk(nm), bbox_labels.chunk(nm))) #type: ignore\n if isinstance(img, list) and isinstance(img_id, list) and isinstance(img_size, list) and isinstance(\n bbox_sizes, list) and isinstance(bbox_labels, list):\n return list(\n zip(\n [img[i::nm] for i in range(nm)],\n [img_id[i::nm] for i in range(nm)],\n [img_size[i::nm] for i in range(nm)],\n [bbox_sizes[i::nm] for i in range(nm)],\n [bbox_labels[i::nm] for i in range(nm)],\n )) #type: ignore\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.CrossEntropyLoss",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evgeniya-egupova/openvino | [
"a9a583eb42d43322b39b95b164b5b22c4f341111",
"a9a583eb42d43322b39b95b164b5b22c4f341111",
"a9a583eb42d43322b39b95b164b5b22c4f341111"
] | [
"src/bindings/python/tests/test_ngraph/test_core.py",
"model-optimizer/extensions/front/tf/sparse_to_dense_replacer.py",
"tools/pot/openvino/tools/pot/graph/passes.py"
] | [
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nimport openvino.runtime.opset8 as ov\nfrom openvino.runtime.impl import Dimension, Function, PartialShape, Shape\n\n\ndef test_dimension():\n dim = Dimension()\n assert dim.is_dynamic\n assert not dim.is_static\n assert repr(dim) == \"<Dimension: ?>\"\n\n dim = Dimension.dynamic()\n assert dim.is_dynamic\n assert not dim.is_static\n assert repr(dim) == \"<Dimension: ?>\"\n\n dim = Dimension(10)\n assert dim.is_static\n assert len(dim) == 10\n assert dim.get_length() == 10\n assert dim.get_min_length() == 10\n assert dim.get_max_length() == 10\n assert repr(dim) == \"<Dimension: 10>\"\n\n dim = Dimension(5, 15)\n assert dim.is_dynamic\n assert dim.get_min_length() == 5\n assert dim.get_max_length() == 15\n assert repr(dim) == \"<Dimension: [5, 15]>\"\n\n\ndef test_dimension_comparisons():\n d1 = Dimension.dynamic()\n d2 = Dimension.dynamic()\n assert d1 == d2\n assert d1 == -1\n assert d1.refines(d2)\n assert d1.relaxes(d2)\n assert d2.refines(d1)\n assert d2.relaxes(d1)\n assert d2.compatible(d1)\n assert d2.same_scheme(d1)\n\n d1 = Dimension.dynamic()\n d2 = Dimension(3)\n assert d1 != d2\n assert d2 == 3\n assert not d1.refines(d2)\n assert d1.relaxes(d2)\n assert d2.refines(d1)\n assert not d2.relaxes(d1)\n assert d2.compatible(d1)\n assert not d2.same_scheme(d1)\n\n d1 = Dimension(3)\n d2 = Dimension(3)\n assert d1 == d2\n assert d1.refines(d2)\n assert d1.relaxes(d2)\n assert d2.refines(d1)\n assert d2.relaxes(d1)\n assert d2.compatible(d1)\n assert d2.same_scheme(d1)\n\n d1 = Dimension(4)\n d2 = Dimension(3)\n assert d1 != d2\n assert not d1.refines(d2)\n assert not d1.relaxes(d2)\n assert not d2.refines(d1)\n assert not d2.relaxes(d1)\n assert not d2.compatible(d1)\n assert not d2.same_scheme(d1)\n\n\ndef test_partial_shape():\n ps = PartialShape([1, 2, 3, 4])\n assert ps.is_static\n assert not ps.is_dynamic\n assert ps.rank == 4\n assert repr(ps) == \"<PartialShape: {1,2,3,4}>\"\n assert ps.get_dimension(0) == Dimension(1)\n assert ps.get_dimension(1) == Dimension(2)\n assert ps.get_dimension(2) == Dimension(3)\n assert ps.get_dimension(3) == Dimension(4)\n\n shape = Shape([1, 2, 3])\n ps = PartialShape(shape)\n assert ps.is_static\n assert not ps.is_dynamic\n assert ps.all_non_negative\n assert ps.rank == 3\n assert list(ps.get_shape()) == [1, 2, 3]\n assert list(ps.get_max_shape()) == [1, 2, 3]\n assert list(ps.get_min_shape()) == [1, 2, 3]\n assert list(ps.to_shape()) == [1, 2, 3]\n assert repr(shape) == \"<Shape: {1, 2, 3}>\"\n assert repr(ps) == \"<PartialShape: {1,2,3}>\"\n\n ps = PartialShape([Dimension(1), Dimension(2), Dimension(3), Dimension.dynamic()])\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.all_non_negative\n assert ps.rank == 4\n assert list(ps.get_min_shape()) == [1, 2, 3, 0]\n assert list(ps.get_max_shape())[3] > 1000000000\n assert repr(ps) == \"<PartialShape: {1,2,3,?}>\"\n assert ps.get_dimension(0) == Dimension(1)\n assert ps.get_dimension(1) == Dimension(2)\n assert ps.get_dimension(2) == Dimension(3)\n assert ps.get_dimension(3) == Dimension.dynamic()\n\n ps = PartialShape([1, 2, 3, -1])\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.all_non_negative\n assert ps.rank == 4\n assert list(ps.get_min_shape()) == [1, 2, 3, 0]\n assert list(ps.get_max_shape())[3] > 1000000000\n assert repr(ps) == \"<PartialShape: {1,2,3,?}>\"\n\n ps = PartialShape.dynamic()\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.rank == Dimension.dynamic()\n assert list(ps.get_min_shape()) == []\n assert list(ps.get_max_shape()) == []\n assert repr(ps) == \"<PartialShape: ?>\"\n\n ps = PartialShape.dynamic(r=Dimension(2))\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.rank == 2\n assert 2 == ps.rank\n assert list(ps.get_min_shape()) == [0, 0]\n assert list(ps.get_max_shape())[0] > 1000000000\n assert repr(ps) == \"<PartialShape: {?,?}>\"\n\n\ndef test_partial_shape_compatible():\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape([3])\n ps2 = PartialShape.dynamic()\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape([4])\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape([2, -1, 3, -1, 5])\n ps2 = PartialShape([2, -1, -1, 4, 5])\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape([2, -1, 3, -1, 5])\n ps2 = PartialShape([1, -1, -1, 4, 5])\n assert not ps1.compatible(ps2)\n\n\ndef test_partial_shape_same_scheme():\n ps1 = PartialShape([1, 2, -1])\n ps2 = PartialShape([1, 3, -1])\n assert not ps1.same_scheme(ps2)\n\n ps1 = PartialShape([1, 2, -1])\n ps2 = PartialShape([1, 2, -1])\n assert ps1.same_scheme(ps2)\n\n ps1 = PartialShape([1, 2, 3])\n ps2 = PartialShape([1, 2, 3])\n assert ps1.same_scheme(ps2)\n\n ps1 = PartialShape([-1, 2, 3])\n ps2 = PartialShape([1, -1, 3])\n assert not ps1.same_scheme(ps2)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1.same_scheme(ps2)\n\n\ndef test_partial_shape_refinement():\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1.refines(ps2)\n assert ps1.relaxes(ps2)\n assert ps2.refines(ps1)\n assert ps2.relaxes(ps1)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape([3, -1, 7, 9])\n assert not ps1.refines(ps2)\n assert ps1.relaxes(ps2)\n assert ps2.refines(ps1)\n assert not ps2.relaxes(ps1)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape([3, 5, 7, 9])\n assert not ps1.refines(ps2)\n assert ps1.relaxes(ps2)\n assert ps2.refines(ps1)\n assert not ps2.relaxes(ps1)\n\n\ndef test_partial_shape_equals():\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1 == ps2\n\n ps1 = PartialShape([1, 2, 3])\n ps2 = PartialShape([1, 2, 3])\n assert ps1 == ps2\n\n shape = Shape([1, 2, 3])\n ps = PartialShape([1, 2, 3])\n assert shape == ps\n\n\ndef test_repr_dynamic_shape():\n shape = PartialShape([-1, 2])\n parameter_a = ov.parameter(shape, dtype=np.float32, name=\"A\")\n parameter_b = ov.parameter(shape, dtype=np.float32, name=\"B\")\n model = parameter_a + parameter_b\n function = Function(model, [parameter_a, parameter_b], \"simple_dyn_shapes_graph\")\n\n assert repr(function) == \"<Function: 'simple_dyn_shapes_graph' ({?,2})>\"\n\n ops = function.get_ordered_ops()\n for op in ops:\n assert \"{?,2}\" in repr(op)\n\n\ndef test_discrete_type_info():\n data_shape = [6, 12, 10, 24]\n data_parameter = ov.parameter(data_shape, name=\"Data\", dtype=np.float32)\n k = np.int32(3)\n axis = np.int32(1)\n n1 = ov.topk(data_parameter, k, axis, \"max\", \"value\")\n n2 = ov.topk(data_parameter, k, axis, \"max\", \"value\")\n n3 = ov.sin(0.2)\n\n assert n1.type_info.name == \"TopK\"\n assert n3.type_info.name == \"Sin\"\n assert n1.get_type_info().name == \"TopK\"\n assert n3.get_type_info().name == \"Sin\"\n assert n1.type_info.name == n2.type_info.name\n assert n1.type_info.version == n2.type_info.version\n assert n1.type_info.parent == n2.type_info.parent\n assert n1.get_type_info().name == n2.get_type_info().name\n assert n1.get_type_info().version == n2.get_type_info().version\n assert n1.get_type_info().parent == n2.get_type_info().parent\n assert n1.get_type_info().name != n3.get_type_info().name\n assert n1.get_type_info().name > n3.get_type_info().name\n assert n1.get_type_info().name >= n3.get_type_info().name\n assert n3.get_type_info().name < n1.get_type_info().name\n assert n3.get_type_info().name <= n1.get_type_info().name\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom extensions.ops.scatternd import ScatterNDUpdate\nfrom mo.front.common.replacement import FrontReplacementOp\nfrom mo.graph.graph import Node, Graph, rename_nodes\nfrom mo.ops.broadcast import Broadcast\nfrom mo.ops.const import Const\n\n\nclass SparseToDenseReplacer(FrontReplacementOp):\n \"\"\"\n This replacer substitutes TensorFlow SparseToDense operation with Broadcast -> ScatterND chain.\n The Broadcast operation creates a tensor filled with default value and of required shape.\n The ScatterND operation updates the created tensor with required values at required locations.\n \"\"\"\n op = \"SparseToDense\"\n enabled = True\n\n def run_after(self):\n from extensions.front.tf.CTCGreedyDecoderReplacement import CTCGreedyDecoderReplacement\n from extensions.front.tf.CTCLossReplacement import CTCLossReplacement\n return [CTCGreedyDecoderReplacement, CTCLossReplacement]\n\n def replace_op(self, graph: Graph, node: Node):\n node_name = node.soft_get('name', node.id)\n\n # broadcast default value to required shape\n broadcast_node = Broadcast(graph, {'name': node_name + '/Broadcast_'}).create_node()\n node.in_port(1).get_connection().set_destination(broadcast_node.in_port(1))\n if not node.in_port(3).disconnected():\n node.in_port(3).get_connection().set_destination(broadcast_node.in_port(0))\n else:\n broadcast_node.in_port(0).connect(Const(graph, {'name': broadcast_node.name + '/FillValue_',\n 'value': np.float32(0)}\n ).create_node().out_port(0))\n\n # update broadcasted tensor with required values at required locations\n scatternd_node = ScatterNDUpdate(graph, {'name': node_name + '/ScatterNDUpdate_'}).create_node()\n scatternd_node.in_port(0).connect(broadcast_node.out_port(0))\n node.in_port(0).get_connection().set_destination(scatternd_node.in_port(1))\n node.in_port(2).get_connection().set_destination(scatternd_node.in_port(2))\n\n rename_nodes([(node, node_name + \"/AbandonedName\"), (scatternd_node, node_name)])\n\n return [scatternd_node.id]\n",
"# Copyright (C) 2020-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import deque\n\nfrom copy import copy\nfrom functools import partial\nfrom itertools import cycle\nfrom typing import Dict\nfrom typing import List, Set\n\nimport numpy as np\nfrom extensions.back.ForceStrictPrecision import ForceStrictPrecision\nfrom extensions.back.compress_quantized_weights import CompressQuantizeWeights\nfrom extensions.ops.elementwise import Add\nfrom extensions.ops.Cast import Cast\nfrom extensions.ops.fakequantize import FakeQuantize\nfrom mo.back.replacement import BackReplacementPattern\nfrom mo.front.common.replacement import FrontReplacementSubgraph\nfrom mo.graph.graph import Graph, Node, rename_node\nfrom mo.graph.port import Port\nfrom mo.middle.pattern_match import apply_pattern\nfrom mo.ops.const import Const\nfrom mo.middle.passes.convert_data_type import convert_blob\nfrom mo.middle.passes.infer import type_infer\n\nfrom . import editor as ge\nfrom . import node_utils as nu\nfrom .pattern_utils import get_fq_result_pattern\nfrom .special_operations import OPERATIONS_WITH_WEIGHTS, DETECTION_OUTPUT_FINAL_TYPES, SPLIT_OPERATIONS\nfrom .utils import find_operation_matches, is_ignored, get_hw_aware_ignored_patterns\nfrom ..graph.node_utils import get_all_node_outputs, get_node_inputs, get_node_input, get_weights_for_node\nfrom ..graph.special_patterns import get_ignored_patterns\nfrom ..utils.logger import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass SaveBNStatistics(FrontReplacementSubgraph):\n enabled = True\n\n def pattern(self):\n return dict(\n nodes=[\n ('input', dict()),\n ('const_1', dict(op='Const')),\n ('const_2', dict(op='Const')),\n ('const_3', dict(op='Const')),\n ('const_4', dict(op='Const')),\n ('bn', dict(op=lambda x: x in ['FusedBatchNorm',\n 'BatchNorm',\n 'BatchNormalization'])),\n ],\n edges=[\n ('input', 'bn', {'in': 0}),\n ('const_1', 'bn', {'in': 1}),\n ('const_2', 'bn', {'in': 2}),\n ('const_3', 'bn', {'in': 3}),\n ('const_4', 'bn', {'in': 4}),\n ])\n\n def replace_sub_graph(self, _, match):\n input_node = match['input']\n\n bn = match['bn']\n const_1 = match['const_1']\n const_2 = match['const_2']\n const_3 = match['const_3']\n const_4 = match['const_4']\n\n input_node['bn_weights'] = {\n 'std': const_1.value,\n 'mean': const_2.value,\n 'input_std': const_3.value,\n 'input_mean': const_4.value\n }\n\n logger.debug('Save BN %s weights to %s node', bn.name, input_node.name)\n\n\nclass InsertFakeQuantize(BackReplacementPattern):\n\n enabled = False\n\n @property\n def quantize_operations(self):\n return getattr(self, '_quantize_operations', [])\n\n @quantize_operations.setter\n def quantize_operations(self, value):\n setattr(self, '_quantize_operations', value)\n\n @property\n def ignored_params(self):\n return getattr(self, '_ignored_params', {'skip_model': False, 'scope': [], 'operations': []})\n\n @ignored_params.setter\n def ignored_params(self, value):\n setattr(self, '_ignored_params', value)\n\n def pattern(self):\n op_types = []\n for op in self.quantize_operations:\n op_types.append(op['type'])\n\n return dict(\n nodes=[\n ('m_op', {'type': lambda x: x in op_types})\n ],\n edges=[]\n )\n\n @staticmethod\n def quantize_only_input(node: Node):\n if node.type in ['Interpolate', 'Power', 'ReduceMean', 'NormalizeL2',\n 'Assign', 'PReLU', 'ReLU', 'Sigmoid', 'Tanh', 'Clamp', 'MVN']:\n return True\n # ScaleSift case, FQ only for input\n if node.type == 'Multiply' and nu.check_input_data_is_const(node, 1):\n output_node = nu.get_node_output(node, 0)[0]\n if output_node.type == 'Add' and nu.check_input_data_is_const(output_node, 1):\n logger.debug('Scaleshift found at {}->{}'.format(node.name, output_node.name))\n return True\n return False\n\n def replace_pattern(self, graph: Graph, match: Dict[str, Node]):\n m_op = match['m_op']\n if not find_operation_matches(self.quantize_operations, m_op) \\\n or is_ignored(self.ignored_params, m_op):\n return\n\n if m_op.type in ['Convolution', 'ConvolutionBackpropData', 'MatMul']:\n insert_fake_quantize(graph, m_op, [0, 1], ['fq_input', 'fq_weights'])\n elif m_op.type == 'LSTMCell':\n insert_fake_quantize(graph, m_op, [0, 1, 2, 3, 4])\n elif self.quantize_only_input(m_op):\n insert_fake_quantize(graph, m_op, [0])\n else:\n insert_fake_quantize(graph, m_op)\n\n\nclass FakeQuantizePropagation(BackReplacementPattern):\n\n enabled = False\n\n def remove_node_and_reset_connections(self, graph, node: Node, in_port):\n node.in_port(0).disconnect()\n node.out_port(0).get_connection().set_source(in_port)\n graph.remove_node(node.id)\n\n def jump_to_first_input(self, graph, fq: Node) -> []:\n in_port = fq.in_port(0).get_source()\n op = in_port.node\n\n # Disconnect FQ from input and reconnect outputs to input node\n self.remove_node_and_reset_connections(graph, fq, in_port)\n\n return insert_fake_quantize(graph, op, [0])\n\n def jump_to_all_inputs(self, graph: Graph, fq: Node) -> []:\n in_port = fq.in_port(0).get_source()\n op = in_port.node\n\n # Disconnect FQ from input and reconnect outputs to input node\n self.remove_node_and_reset_connections(graph, fq, in_port)\n\n # Insert FQ operations for all inputs\n return insert_fake_quantize(graph, op)\n\n def jump_to_all_branch_except_const(self, graph, fq: Node) -> []:\n in_port = fq.in_port(0).get_source()\n op = in_port.node\n ports = [i for i in range(op.in_ports_count) if op.in_port(i).get_source() is not None and \\\n op.in_port(i).get_source().node.type != 'Const']\n\n # Disconnect FQ from input and reconnect outputs to input node\n self.remove_node_and_reset_connections(graph, fq, in_port)\n\n return insert_fake_quantize(graph, op, ports)\n\n def jump_over_split_concat(self, graph: Graph, fq: Node) -> []:\n in_port = fq.in_port(0).get_source()\n op = in_port.node\n\n # Disconnect FQ from input and reconnect outputs to input node\n self.remove_node_and_reset_connections(graph, fq, in_port)\n\n # Insert FQ operations for split input\n return insert_fake_quantize(graph, get_node_inputs(op)[0], [0])\n\n def remove_duplication(self, graph: Graph, fq: Node) -> []:\n # Keep only input operation\n fq.out_port(0).get_connection().set_source(fq.in_port(0).get_source())\n fq.in_port(0).disconnect()\n graph.remove_node(fq.id)\n return []\n\n def check_split_concat(self, node):\n if node.type != 'Concat':\n return False\n upper_ops = get_node_inputs(node)\n return upper_ops[0].type not in SPLIT_OPERATIONS \\\n and len({upper_op.name for upper_op in upper_ops}) == 1 \\\n and len({up_down.name for up_down in get_all_node_outputs(upper_ops[0])}) == 1\n\n jump_single_branch_ops = ['ReduceMax', 'MaxPool', 'Reshape', 'Flatten', 'Squeeze', 'Unsqueeze', 'Interpolate',\n 'Split', 'Crop', 'ReduceMean', 'AvgPool', 'Result', 'Tile', 'Transpose', 'StridedSlice',\n 'VariadicSplit', 'ShuffleChannels', 'Broadcast', 'Minimum', 'Maximum', 'DepthToSpace']\n remove_duplication_ops = ['FakeQuantize', 'Parameter']\n jump_multi_branch_ops = 'Concat'\n jump_multi_branch_ops_except_const = ['Pad', 'ConvertLike']\n jump_split_concat_ops = ('Split', 'VariadicSplit', 'Concat')\n map_op_to_fn = {\n **dict(zip(jump_single_branch_ops, cycle([jump_to_first_input]))),\n **dict(zip(\n remove_duplication_ops, cycle([remove_duplication]))),\n jump_multi_branch_ops: jump_to_all_inputs,\n **dict(zip(\n jump_multi_branch_ops_except_const, cycle([jump_to_all_branch_except_const]))),\n jump_split_concat_ops: jump_over_split_concat\n }\n\n def delete_fq_non_quantizable_node_precision(self, graph):\n type_infer(graph)\n fq_removal = RemoveFakeQuantize()\n fq_removal.quantize_agnostic_operations = self.quantize_agnostic_operations\n fq_removal.quantize_operations = self.quantize_operations\n node_int_fq = []\n fq_queue = deque(sorted(graph.get_op_nodes(type='FakeQuantize'), key=lambda x: x.name))\n while fq_queue:\n fq = fq_queue.popleft()\n if fq.in_port(0).get_source() is not None and fq.in_port(0).get_source().is_data_type_defined():\n type_node = fq.in_port(0).get_source().get_data_type()\n if type_node in (np.int32, np.int64, bool):\n node_int_fq.append(fq.name)\n fq_removal.find_and_remove_node(graph, fq.name)\n\n @property\n def quantize_inputs(self):\n return getattr(self, '_quantize_inputs', False)\n\n @quantize_inputs.setter\n def quantize_inputs(self, value):\n setattr(self, '_quantize_inputs', value)\n\n @property\n def quantize_operations(self):\n return getattr(self, '_quantize_operations', None)\n\n @quantize_operations.setter\n def quantize_operations(self, value):\n setattr(self, '_quantize_operations', value)\n\n @property\n def quantize_agnostic_operations(self):\n return getattr(self, '_quantize_agnostic_operations', [])\n\n @quantize_agnostic_operations.setter\n def quantize_agnostic_operations(self, value):\n setattr(self, '_quantize_agnostic_operations', value)\n for op in value:\n if op['type'] not in self.map_op_to_fn:\n raise RuntimeError('FakeQuantizePropagation could not support operation {}'.format(op))\n\n def find_and_replace_pattern(self, graph: Graph):\n fq_queue = deque(sorted(graph.get_op_nodes(type='FakeQuantize'), key=lambda x: x.name))\n skip_ascent_map = self._create_skip_ascent_map(graph)\n # Iterate over FakeQuantize operations and push them on top while it's possible\n while fq_queue:\n # Get FakeQuantize from queue and it's input node\n fq = fq_queue.popleft()\n\n # In case if we already touched this FakeQuantize it could be disconnected from the main graph\n if fq.in_port(0).disconnected():\n continue\n\n input_node = fq.in_port(0).get_source().node\n input_type = input_node.type\n output_node = nu.get_node_output(fq, 0)[0]\n output_type = output_node.type\n\n # Check that input type is allowed from jumping over\n m_op = find_operation_matches(self.quantize_agnostic_operations, input_node)\n is_scaleshift = output_type == 'Multiply' and nu.get_node_output(output_node, 0)[0].type == 'Add'\n if len(m_op) > 1:\n raise RuntimeError(\n 'FakeQuantizePropagation matched several callback functions for operation {}'.format(input_node))\n if input_type not in self.remove_duplication_ops and \\\n skip_ascent_map[input_node.name]:\n continue\n if m_op \\\n or input_type == 'FakeQuantize' \\\n or (input_type == 'Parameter'\n and is_scaleshift\n and not self.quantize_inputs):\n input_name = input_node.name\n if self.check_split_concat(input_node):\n input_parent_name = get_node_inputs(input_node)[0].name\n if not skip_ascent_map[input_parent_name]:\n input_type = ('Split', 'VariadicSplit', 'Concat')\n input_name = (input_node.name, input_parent_name)\n logger.debug('FQ %s jumped over %s (%s)', fq.name, input_type, input_name)\n\n callback = self.map_op_to_fn[input_type]\n new_fq = callback(self, graph, fq)\n\n # Update queue with new (moved) FQ operations\n if isinstance(new_fq, list):\n for fq in new_fq:\n fq_queue.appendleft(fq)\n elif isinstance(new_fq, Node) and new_fq.type == 'FakeQuantize':\n fq_queue.appendleft(new_fq)\n else:\n raise RuntimeError(\n 'Unsupported response ({}) from callback {}.'.format(\n new_fq, self.map_op_to_fn[input_type]))\n\n def _create_skip_ascent_map(self, graph: Graph) -> {}:\n\n def _is_node_skippable(node, skip_ascent_map):\n skippable_ops = [*self.jump_single_branch_ops, self.jump_multi_branch_ops, 'FakeQuantize',\n *self.jump_multi_branch_ops_except_const]\n\n def sink_fn(op):\n out = []\n if op.type != 'FakeQuantize':\n out = [n for n in get_all_node_outputs(op) if n.type != 'ShapeOf']\n return out\n\n def source_fn(op):\n return [p for p in get_node_inputs(op)\n if p and p.type not in ['FakeQuantize', 'Const'] and p.name != node.name]\n\n def is_multibranch_fn(op):\n return op.type == self.jump_multi_branch_ops\n\n def has_fake_quantize_fn(op):\n return op.type == 'FakeQuantize'\n\n def not_skippable_op_fn(op):\n return op.type not in skippable_ops\n\n def process_multibranch_descendants(criteria):\n _skip_multibranch_ascent_ops = {}\n for name in criteria[is_multibranch_fn]:\n if name in skip_ascent_map:\n _skip_multibranch_ascent_ops[name] = skip_ascent_map[name]\n else:\n _skip_multibranch_ascent_ops[name] = _is_node_skippable(\n ge.get_node_by_name(graph, name, recursively=False), skip_ascent_map)\n skip_ascent_map.update(_skip_multibranch_ascent_ops)\n return any(_skip_multibranch_ascent_ops.values())\n\n def process_multibranch(op):\n if not traverse_graph(op, source_fn, not_skippable_op_fn)[0]:\n return False\n res, criteria = traverse_graph(\n op, sink_fn, not_skippable_op_fn,\n [is_multibranch_fn, has_fake_quantize_fn])\n if res or has_fake_quantize_fn not in criteria:\n return True\n if is_multibranch_fn in criteria:\n return process_multibranch_descendants(criteria)\n return has_fake_quantize_fn not in criteria\n\n def process_singlebranch(op):\n res, criteria = traverse_graph(\n op, sink_fn, not_skippable_op_fn, is_multibranch_fn)\n if criteria:\n return process_multibranch_descendants(criteria)\n return res\n\n def stop_fn(op):\n if op.type not in skippable_ops:\n return True\n if op.name == node.name:\n return False\n if is_multibranch_fn(op):\n return process_multibranch(op)\n return process_singlebranch(op)\n\n return traverse_graph(node, sink_fn, stop_fn)[0]\n\n skip_ascent = {}\n for op in graph.get_op_nodes():\n if 'skipped' in op and op['skipped']:\n skip_ascent[op.name] = True\n if op.name not in skip_ascent:\n skip_ascent[op.name] = _is_node_skippable(op, skip_ascent)\n\n return skip_ascent\n\n\nclass FakeQuantizeOptimization(BackReplacementPattern):\n\n enabled = False\n\n def find_and_replace_pattern(self, graph: Graph):\n for op in sorted(graph.get_op_nodes(), key=lambda x: x.name):\n for _, out_port in op.out_ports().items():\n if out_port.disconnected():\n continue\n # Get all consumers that are FakeQuantize\n fq_consumers = [in_port.node for in_port in out_port.get_destinations()\n if in_port.node.type == 'FakeQuantize' and in_port.idx == 0]\n fq_consumers = sorted(fq_consumers, key=lambda x: x.name)\n # Keep only first FakeQuantize and disconnect other\n for fq in fq_consumers[1:]:\n logger.debug('Removed useless FakeQuantize {}'.format(fq.name))\n fq.in_port(0).disconnect()\n fq.out_port(0).get_connection().set_source(fq_consumers[0].out_port(0))\n\n\nclass RemoveFakeQuantize:\n def find_and_remove_node(self, graph, node_name, force=False):\n node = ge.get_node_by_name(graph, node_name, recursively=False)\n if not node:\n return [], []\n\n if force:\n self.disconnect_fq_node(node)\n return [node_name], []\n\n nodes_to_cut, ops_in_orig_prec = self.find_fq_nodes_to_cut(node)\n for fq_node in nodes_to_cut:\n self.disconnect_fq_node(fq_node)\n self.undo_renaming(graph, fq_node)\n\n for op in ops_in_orig_prec:\n if op.type in ('Convolution', 'MatMul'):\n self.undo_bias_correction(op)\n self.undo_weights_rescaling(op)\n\n return [fq_node.name for fq_node in nodes_to_cut], [op.name for op in ops_in_orig_prec]\n\n def find_fq_nodes_to_cut(self, node):\n def parse_node_relatives(node, is_parents):\n if not node:\n return\n\n if find_operation_matches(self.quantize_operations, node):\n ops_to_return_in_orig_prec.add(node)\n\n seen_list = seen_parents if is_parents else seen_children\n relatives_ports = nu.get_node_input_ports(node) if is_parents else nu.get_node_output_ports(node)\n relatives_ports = [p for p in relatives_ports if p]\n for relative_port in relatives_ports:\n relative = relative_port.node\n if relative.type == 'FakeQuantize':\n if is_parents:\n if relative.name in seen_children:\n continue\n if relative not in to_cut:\n to_cut.append(relative)\n to_see_children.append(relative)\n else:\n seen_children.append(relative.name)\n elif relative.type != 'Const' and relative_port.data.get_value() is None:\n # Here, propagation to KSO subgraphs is blocked by checking the data value\n # which is None for input data propagated nodes.\n if relative.name not in seen_parents:\n to_see_parents.append(relative)\n if relative.name not in seen_children and \\\n find_operation_matches(self.quantize_agnostic_operations, relative):\n to_see_children.append(relative)\n seen_list.append(node.name)\n\n seen_children, seen_parents = [], []\n to_see_children, to_see_parents = [node], []\n to_cut = [node]\n ops_to_return_in_orig_prec = set()\n\n while to_see_parents or to_see_children:\n if to_see_children:\n node = to_see_children.pop()\n parse_node_relatives(node, is_parents=False)\n if to_see_parents:\n node = to_see_parents.pop()\n parse_node_relatives(node, is_parents=True)\n\n return to_cut, ops_to_return_in_orig_prec\n\n def disconnect_fq_node(self, fq_node):\n parent_node_port = fq_node.in_port(0).get_source()\n parent_node = parent_node_port.node\n fq_node.in_port(0).disconnect()\n for port in fq_node.out_ports().values():\n port.get_connection().set_source(parent_node_port)\n if parent_node.type == 'Const':\n parent_node['need_shape_inference'] = True\n\n @staticmethod\n def undo_bias_correction(conv_node):\n bias_node = nu.get_bias_for_node(conv_node)\n if bias_node and 'original_bias' in conv_node:\n nu.set_bias_for_node(conv_node, conv_node['original_bias'])\n\n @staticmethod\n def undo_weights_rescaling(conv_node):\n weights_node = nu.get_node_input(conv_node, 1)\n if weights_node.type == 'FakeQuantize':\n weights_node = nu.get_node_input(weights_node, 0)\n if 'scaling_factor' in conv_node:\n nu.set_node_value(weights_node, nu.get_node_value(weights_node) * conv_node['scaling_factor'])\n if 'wbc_mean_shift' in conv_node:\n original_weights = (nu.get_node_value(weights_node) - conv_node['wbc_mean_shift']) / \\\n conv_node['wbc_variance_shift']\n nu.set_node_value(weights_node, original_weights)\n\n @staticmethod\n def undo_renaming(graph, fq_node):\n if 'orig_fq_name' in fq_node:\n node = ge.get_node_by_name(graph,\n '{fq_name}/pre_fq_input'.format(fq_name=fq_node.fullname),\n recursively=False)\n rename_node(node, node['orig_node_name'])\n rename_node(fq_node, fq_node['orig_fq_name'])\n\n @property\n def quantize_agnostic_operations(self):\n return getattr(self, '_quantize_agnostic_operations', [])\n\n @quantize_agnostic_operations.setter\n def quantize_agnostic_operations(self, value):\n setattr(self, '_quantize_agnostic_operations', value)\n\n @property\n def quantize_operations(self):\n return getattr(self, '_quantize_operations', [])\n\n @quantize_operations.setter\n def quantize_operations(self, value):\n setattr(self, '_quantize_operations', value)\n\n\nclass SpecialBlocksMarker:\n @staticmethod\n def mark_block_nodes(check_pattern_fn, _, match):\n if check_pattern_fn and check_pattern_fn(match):\n return\n match_list = [match[node] for node in match if node not in ['input', 'output']]\n for node in match_list:\n if node.kind == 'op':\n node['skipped'] = True\n\n def mark_ignored_blocks(self, graph, target_device):\n def mark_ignored_blocks_(patterns):\n for types_list in patterns:\n for pattern in patterns[types_list]:\n if isinstance(pattern, tuple):\n pattern, check_pattern_fn = pattern\n mark_fn = partial(self.mark_block_nodes, check_pattern_fn)\n else:\n mark_fn = partial(self.mark_block_nodes, None)\n apply_pattern(\n graph,\n nodes=pattern['nodes'],\n edges=pattern['edges'],\n action=mark_fn\n )\n\n def mark_detection_output_blocks_(graph):\n det_out_finals = ge.get_nodes_by_type(graph, [op['type'] for op in DETECTION_OUTPUT_FINAL_TYPES])\n stop_propagation_types = [op['type'] for op in OPERATIONS_WITH_WEIGHTS]\n stop_propagation_types.append('Const')\n\n def move_fn(op):\n return [node for node in nu.get_node_inputs(op) if\n node is not None and node.type not in stop_propagation_types]\n\n def stop_fn(op):\n op['skipped'] = True\n return False\n\n for det_out_final in det_out_finals:\n traverse_graph(det_out_final, move_fn, stop_fn)\n\n mark_ignored_blocks_(get_hw_aware_ignored_patterns(target_device))\n mark_ignored_blocks_(get_ignored_patterns())\n mark_detection_output_blocks_(graph)\n\n\nclass MatMulPreprocessing(BackReplacementPattern):\n enabled = False\n\n def pattern(self):\n return dict(\n nodes=[\n ('matmul', {'kind': 'op', 'type': 'MatMul'})\n ],\n edges=[],\n )\n\n def replace_pattern(self, graph: Graph, match: Dict[str, Node]):\n matmul = match['matmul']\n input_const = nu.get_node_input(matmul, 1)\n if input_const.type == 'Const' \\\n and len(input_const.shape) > 1 \\\n and not matmul['transpose_b']:\n matmul['transpose_b'] = not matmul['transpose_b']\n matmul_w = nu.get_node_input(matmul, 1)\n matmul_w_value = nu.get_node_value(matmul_w)\n matmul_w_value = np.moveaxis(matmul_w_value, -2, -1)\n nu.set_node_value(matmul_w, matmul_w_value)\n return graph\n\n\nclass IgnoreShapeSubgraph(BackReplacementPattern):\n enabled = False\n\n def pattern(self):\n return dict(\n nodes=[\n ('shape', {'kind': 'op', 'type': 'ShapeOf'})\n ],\n edges=[],\n )\n\n def replace_pattern(self, graph: Graph, match: Dict[str, Node]):\n shape = match['shape']\n all_shape_nodes = find_shape_subgraph_endpoints([shape.out_port(0)])\n for node in all_shape_nodes:\n node['skipped'] = True\n return graph\n\n\nclass ModelPreprocessor(BackReplacementPattern):\n \"\"\"\n Performing equivalent graph transformation needed for further work.\n \"\"\"\n enabled = False\n\n def find_and_replace_pattern(self, graph: Graph):\n MatMulPreprocessing().find_and_replace_pattern(graph)\n IgnoreShapeSubgraph().find_and_replace_pattern(graph)\n InsertBiasNode().insert_null_biases(graph)\n\n\nclass InsertBiasNode:\n\n def insert_null_biases(self, graph: Graph):\n # Get nodes by type only for Convolutions instead of OPERATIONS_WITH_BIAS\n for node in ge.get_nodes_by_type(graph, ['Convolution']):\n if not nu.get_bias_for_node(node):\n create_bias_node(graph, node)\n\n\nclass FakeQuantizeNameSwapper(BackReplacementPattern):\n \"\"\"\n Performing equivalent graph transformation needed for further work.\n \"\"\"\n enabled = False\n\n def rename_fqs_in_the_end(self, graph: Graph):\n def change_names(_, match):\n fq_node = match['fq']\n input_node = get_node_input(fq_node, 0)\n new_fq_name = copy(input_node.name)\n if 'orig_node_name' in input_node:\n new_fq_name = copy(input_node['orig_node_name'])\n\n input_node_outputs = get_all_node_outputs(input_node)\n if len(input_node_outputs) > 1 and all([op.type == 'FakeQuantize' for op in input_node_outputs]):\n new_fq_name += '.{}'.format(fq_node.in_port(0).get_source().idx)\n\n fq_node['orig_fq_name'] = copy(fq_node.name)\n rename_node(fq_node, new_fq_name)\n\n if 'orig_node_name' not in input_node:\n input_node['orig_node_name'] = copy(input_node.name)\n rename_node(input_node, f'{input_node.name}/pre_fq_input')\n\n pattern = get_fq_result_pattern()\n apply_pattern(\n graph,\n nodes=pattern['nodes'],\n edges=pattern['edges'],\n action=change_names\n )\n\n\ndef create_bias_node(graph: Graph, src_node):\n logger.debug('Creating new bias for {}'.format(src_node.name))\n destination_ports = []\n for dest_port in src_node.out_port(0).get_destinations():\n destination_ports.append(dest_port)\n\n # Create Add and constant with zero bias\n bias_shape = src_node.out_port(0).data.get_shape()\n add_bias_shape = [1] * len(bias_shape)\n add_bias_shape[1] = bias_shape[1]\n weights = get_weights_for_node(src_node)\n bias_dtype = np.float32\n if weights and weights.out_port(0).is_data_type_defined():\n bias_dtype = weights.out_port(0).get_data_type()\n add_bias = Const(graph,\n {'value': np.zeros(add_bias_shape, dtype=bias_dtype),\n 'shape': add_bias_shape,\n 'need_shape_inference': True\n }).create_node()\n add_op = Add(graph, {'name': src_node.name + '/add_',\n 'need_shape_inference': True}).create_node()\n\n # Connect Const to Add node\n add_op.in_port(1).connect(add_bias.out_port(0))\n\n # Reconnect src_node -> output to src_node -> Add -> output\n src_node.out_port(0).disconnect()\n src_node.out_port(0).get_connection().set_destination(add_op.in_port(0))\n\n for destination_port in destination_ports:\n add_op.out_port(0).connect(destination_port)\n add_bias.out_node(0)['Insert_Convert_operation_after'] = True\n\n\ndef create_fake_quantize_node(graph: Graph, name):\n fq = FakeQuantize(graph, {'name': name, 'levels': 0,\n 'stop_value_propagation': True}).create_node()\n\n input_low = Const(graph, {'value': np.array(0.0).astype(np.float32)}).create_node()\n input_height = Const(graph, {'value': np.array(0.0).astype(np.float32)}).create_node()\n output_low = Const(graph, {'value': np.array(0.0).astype(np.float32)}).create_node()\n output_height = Const(graph, {'value': np.array(0.0).astype(np.float32)}).create_node()\n\n input_low.out_port(0).connect(fq.in_port(1))\n input_height.out_port(0).connect(fq.in_port(2))\n output_low.out_port(0).connect(fq.in_port(3))\n output_height.out_port(0).connect(fq.in_port(4))\n\n input_low.infer(input_low)\n input_height.infer(input_height)\n output_low.infer(output_low)\n output_height.infer(output_height)\n\n return fq\n\n\ndef insert_fake_quantize(graph, node, ports=None, names=None):\n blobs_as_inputs_nodes_type = ['Convolution', 'Deconvolution', 'MatMul']\n\n port_name = None\n if ports is not None and names is not None:\n port_name = dict(zip(ports, names))\n\n new_fq = []\n for idx, port in node.in_ports().items():\n if port.disconnected():\n continue\n\n # Temporary WA while blobs_as_inputs option isn't work properly\n if node.type in blobs_as_inputs_nodes_type:\n if 'bin' in node.in_edges()[idx]:\n del node.in_edges()[idx]['bin']\n\n if ports is not None and idx not in ports:\n continue\n\n # This condition blocks FQ insertion after the keep_shape_ops (KSO) generated sub-graph\n # to avoid quantization of integer-like tensors\n if port.get_source().node.type != 'Const' and port.data.get_value() is not None:\n continue\n\n name = 'fq_input'\n if port_name is not None and idx in port_name:\n name = port_name[idx]\n\n # Create FakeQuantize operations\n fq_input = create_fake_quantize_node(\n graph, '{node_name}/{name}_{idx}'.format(node_name=node.name, name=name, idx=idx))\n\n # Insert FakeQuantize after input\n if node.type == 'Result':\n in_port = port.get_source()\n port.get_connection().set_source(fq_input.out_port(0))\n in_port.connect(fq_input.in_port(0))\n else:\n port.get_connection().set_destination(fq_input.in_port(0))\n fq_input.out_port(0).connect(port)\n\n fq_input.infer(fq_input)\n\n new_fq.append(fq_input)\n return new_fq\n\n\ndef traverse_graph(node, move_fn, stop_criteria_fn=None, criteria_fns=None):\n \"\"\" Traverse through graph dependent on move_fn\n :param node: node to start floating or sinking with some rule\n :param move_fn: function to get relatives (children, parents or some subset of them)\n to make traverse through graph. Function should have node as argument.\n You can make traverse up/down or whatever you want using this function.\n :param stop_criteria_fn: function to stop traversing and return boolean result.\n Function should have node as argument.\n :param criteria_fns: list of functions or just function with specified criteria for nodes.\n Returns True if criteria was satisfied at least at one node, False otherwise\n :return pair of values. The first one is a boolean value. In case stop criteria was satisfied\n the value is True, False otherwise.\n The second value is a dict of criteria values. The keys are function itself, node names where\n criteria was satisfied is used as dict values.\n \"\"\"\n\n if criteria_fns and callable(criteria_fns):\n criteria_fns = [criteria_fns]\n\n criteria_res = {}\n\n def apply_criteria_fn(n):\n if not criteria_fns:\n return\n for fn in criteria_fns:\n node_name = n.name if fn(n) else None\n if node_name:\n if fn in criteria_res:\n criteria_res[fn].append(node_name)\n else:\n criteria_res[fn] = [node_name]\n\n def stop_traverse_fn(n):\n if stop_criteria_fn:\n return stop_criteria_fn(n)\n return False\n\n queue, visited = [node], []\n\n while queue:\n current_node = queue.pop(0)\n if current_node.name in visited:\n continue\n\n relatives = move_fn(current_node)\n visited.append(current_node.name)\n\n if relatives:\n for r in relatives:\n apply_criteria_fn(r)\n if stop_traverse_fn(r):\n return True, criteria_res\n queue += relatives\n return False, criteria_res\n\n\ndef compress_weights(model: Graph):\n \"\"\"Apply transformations to save model weights to INT8.\"\"\"\n CompressQuantizeWeights().find_and_replace_pattern(model)\n model.clean_up()\n ForceStrictPrecision().find_and_replace_pattern(model)\n model.clean_up()\n\n\ndef get_next_in_ports(in_port: Port) -> Set[Port]:\n next_in_ports = set()\n for out_port in in_port.node.out_ports().values():\n next_in_ports.update(out_port.get_destinations())\n return next_in_ports\n\n\ndef find_shape_subgraph_endpoints(out_ports: List[Port], visited: set = None) -> Set[Port]:\n \"\"\"\n Searches for input ports of data dependent operations starting from output ports passed to the function.\n Condition for data dependent operations is absence of node output value.\n\n :param out_ports: list of output ports to start search from\n :param visited: set of input ports that were visited to avoid visiting them more than once\n :return: set of all nodes that are part of shape calculating subgraph\n \"\"\"\n if visited is None:\n visited = set()\n\n deque_of_in_ports = deque()\n for out_port in out_ports:\n deque_of_in_ports.extend(out_port.get_destinations())\n\n end_points_in_ports = set()\n visited_nodes = set()\n while deque_of_in_ports:\n in_port = deque_of_in_ports.popleft()\n if in_port in visited:\n continue\n\n next_in_ports = get_next_in_ports(in_port)\n if any([port.data.get_value() is None for port in next_in_ports]):\n end_points_in_ports.add(in_port)\n else:\n deque_of_in_ports.extend(next_in_ports)\n visited_nodes.add(in_port.node)\n visited.add(in_port)\n return visited_nodes\n\n\ndef remove_converts(graph: Graph):\n for op in graph.get_op_nodes(type='Convert'):\n source_op = op.in_port(0).get_source().node\n if source_op.type == 'Const' and source_op.data_type == np.float16:\n # Get access to data node after Convert operation and set Insert_Convert_operation_after\n # to restore Convert operation later\n op.out_node(0)['Insert_Convert_operation_after'] = True\n # Mark Const and Convert operation to fold them\n source_op['need_shape_inference'] = True\n op['stop_value_propagation'] = False\n op['need_shape_inference'] = True\n graph.clean_up()\n\n\ndef add_removed_converts(graph: Graph):\n for data_node_name in graph.get_nodes_with_attributes(Insert_Convert_operation_after=True):\n data_node = Node(graph, data_node_name)\n # Get access to Const node connected to data node\n const_op = data_node.in_node(0)\n assert const_op.data_type == np.float32, \"Error when try to insert Convert operation after Const: {}\".\\\n format(const_op.soft_get('name'))\n\n convert_op = Cast(graph, {'dst_type': np.float32,\n 'name': const_op.name + '/restored_convert',\n 'stop_value_propagation': True}).create_node()\n\n # Insert Convert operation after Const operation\n consumer_port = const_op.out_port(0).get_connection().get_destination()\n const_op.out_port(0).get_connection().set_destination(convert_op.in_port(0))\n convert_op.out_port(0).connect(consumer_port)\n\n # Convert Const value to FP32 to make types in graph consistent\n const_op.value, _, _ = convert_blob(const_op.value, np.float16)\n const_op.infer(const_op)\n"
] | [
[
"numpy.int32"
],
[
"numpy.float32"
],
[
"numpy.array",
"numpy.zeros",
"numpy.moveaxis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
majkee15/HiddenMarkovJumpProcess-RLEnvironment | [
"730ef636bfa51f6137268ab7760f9a504ba583db"
] | [
"control/base.py"
] | [
"import os\n\nimport logging\nimport datetime\n\n\nimport numpy as np\nimport tensorflow as tf\nfrom gym.spaces import Box, Discrete\nfrom gym.utils import colorize\n\nfrom control.utils.misc import Config\nfrom control.utils.misc import REPO_ROOT, RESOURCE_ROOT\n\nfrom abc import ABC, abstractmethod\n\n\n\nclass TrainConfigBase(Config):\n lr = 0.001\n n_steps = 10000\n warmup_steps = 5000\n batch_size = 64\n log_every_step = 1000\n\n # give an extra bonus if done; only needed for certain tasks.\n done_reward = None\n\n\nclass Policy(ABC):\n\n def __init__(self, env, name, training=True, deterministic=False):\n self.env = env\n\n self.training = training\n self.name = self.__class__.__name__ + '--' + name\n\n if deterministic:\n np.random.seed(1)\n\n # Logger\n self.logger = logging.getLogger(name)\n logging.basicConfig()\n self.logger.setLevel(os.getenv('LOG_LEVEL', 'INFO'))\n # self.logger.info('Instantiated class ' + self.__class__.__name__)\n\n @property\n def act_size(self):\n # number of options of an action; this only makes sense for discrete actions.\n if isinstance(self.env.action_space, Discrete):\n return self.env.action_space.n\n else:\n return None\n\n @property\n def act_dim(self):\n # dimension of an action; this only makes sense for continuous actions.\n if isinstance(self.env.action_space, Box):\n return list(self.env.action_space.shape)\n else:\n return []\n\n @property\n def state_dim(self):\n # dimension of a state.\n return list(self.env.observation_space.shape)\n\n @staticmethod\n def obs_to_inputs(self, ob):\n return ob.flatten()\n\n @abstractmethod\n def get_action(self, state, **kwargs):\n pass\n\n @abstractmethod\n def build(self):\n pass\n\n @abstractmethod\n def train(self, *args, **kwargs):\n pass\n\n\n def evaluate(self, n_episodes):\n # TODO: evaluate uses default setting of the environment, i.g., random start\n # this should be done in parallel\n # and it should be depending on a starting state!\n reward_history = []\n\n for i in range(n_episodes):\n ob = self.env.reset()\n done = False\n reward = 0.\n while not done:\n a, q = self.get_action(ob, epsilon=0.0)\n new_ob, r, done, _ = self.env.step(a)\n # self.env.render()\n reward += r\n ob = new_ob\n\n reward_history.append(reward)\n\n #print(\"Avg. reward over {} episodes: {:.4f}\".format(n_episodes, np.mean(reward_history)))\n self.logger.info(\"Avg. reward over {} episodes: {:.4f}\".format(n_episodes, np.mean(reward_history)))\n return reward_history\n\n\nclass BaseModelMixin(ABC):\n\n def __init__(self, model_name, experiment_name=None):\n self._saver = None\n self._writer = None\n self._experiment_name = experiment_name\n self.model_name = model_name\n self.current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n def _get_dir(self, dir_name):\n if self._experiment_name is not None:\n path = os.path.join(RESOURCE_ROOT, dir_name, self._experiment_name, self.model_name, self.current_time)\n else:\n path = os.path.join(RESOURCE_ROOT, dir_name, self.model_name, self.current_time)\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def log_dir(self):\n return self._get_dir('training_logs')\n\n @property\n def checkpoint_dir(self):\n return self._get_dir('checkpoints')\n\n @property\n def model_dir(self):\n return self._get_dir('models')\n\n @property\n def tb_dir(self):\n # tensorboard\n return self._get_dir('tb_logs')\n\n @property\n def writer(self):\n if self._writer is None:\n self._writer = tf.summary.create_file_writer(self.tb_dir)\n return self._writer\n"
] | [
[
"numpy.mean",
"numpy.random.seed",
"tensorflow.summary.create_file_writer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tenpercent/pytorch | [
"7f996b855c5070ab4a6bea0f451c8a22c0ce2394",
"7f996b855c5070ab4a6bea0f451c8a22c0ce2394",
"7f996b855c5070ab4a6bea0f451c8a22c0ce2394"
] | [
"test/test_ops_jit.py",
"test/mobile/model_test/math_ops.py",
"test/mobile/model_test/nn_ops.py"
] | [
"# Owner(s): [\"module: unknown\"]\n\nfrom functools import partial\n\nimport torch\n\nfrom torch.testing import FileCheck\nfrom torch.testing._internal.common_utils import \\\n (run_tests, IS_SANDCASTLE, clone_input_helper, first_sample)\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes\nfrom torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference\nfrom torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, check_alias_annotation\nfrom torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining, is_lambda\n\n\n# TODO: fixme https://github.com/pytorch/pytorch/issues/68972\ntorch.set_default_dtype(torch.float32)\n\n# variant testing is only done with torch.float and torch.cfloat to avoid\n# excessive test times and maximize signal to noise ratio\n_variant_ops = partial(ops, dtypes=OpDTypes.supported,\n allowed_dtypes=(torch.float, torch.cfloat))\n\n\n\n# Tests operators for consistency between JIT and eager, also checks\n# correctness of JIT specific alias schemas and intended\n# autodifferentiation behavior.\n# Inherits from JitCommonTestCase instead of TestCase directly to share\n# functionality with original test_jit.py method operator tests\nclass TestJit(JitCommonTestCase):\n exact_dtype = True\n\n # Tests that the forward and backward passes of operations produce the\n # same values for the cross-product of op variants (function, method, inplace)\n # and runtimes (eager, traced, scripted).\n # TODO WARNING: inplace x {traced, scripted} not currently tested\n @_variant_ops(op_db)\n def test_variant_consistency_jit(self, device, dtype, op):\n _requires_grad = op.supports_autograd and (dtype.is_floating_point or\n op.supports_complex_autograd(torch.device(device).type))\n\n include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex\n samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad, include_conjugated_inputs=include_conjugated_inputs)\n\n # Acquires variants to test\n func = op.get_op()\n method = op.get_method()\n variants = {\n # TODO: inplace tests currently fail, fix and add inplace variant\n 'function': func, 'method': method,\n }\n\n # TODO: find better way to standardize on op registration itself..\n has_fake_function = op.name in [\"resize_\", 'resize_as_']\n\n if has_fake_function:\n variants = {'method': getattr(torch.Tensor, op.name)}\n samples = op.sample_inputs(device, dtype, requires_grad=False)\n\n support_script = op.supports_scripting\n\n tested = False\n for sample in samples:\n # Test traced and scripted consistency\n for func_type, variant in variants.items():\n if variant is None:\n continue\n\n # scripting and check_alias_analysis do not work with lambdas\n # lambdas are typically used as a way to simulate methods without\n # functional variants, so rely on the other variant for testing\n # for now\n if is_lambda(variant):\n continue\n\n tested = True\n\n # Create accessor for script function variant\n name = op.name + '_' if func_type == 'inplace' else op.name\n\n # run with disable_autodiff_subgraph_inlining(True) to test\n # autodiff support. Context manager forces the graph to contain\n # DifferentiableGraph nodes if they are present\n with disable_autodiff_subgraph_inlining():\n # Check scripted forward, grad, and grad grad\n if support_script:\n script_fn = create_script_fn(self, name, func_type)\n\n def out_fn(output):\n # Processes the output for autograd\n if sample.output_process_fn_grad is not None:\n return sample.output_process_fn_grad(output)\n return output\n\n def get_sample():\n return clone_input_helper(sample.input) if op.name[-1] == '_' else sample.input\n\n if support_script:\n check_against_reference(self,\n script_fn,\n func,\n out_fn,\n (get_sample(),) + sample.args,\n sample.kwargs,\n no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)\n\n # Check traced forward, grad, and grad grad\n # TODO: fix tracing here\n supports_tracing = not has_fake_function\n if op.assert_jit_shape_analysis:\n self.assertTrue(supports_tracing)\n\n if supports_tracing:\n traced_fn = create_traced_fn(self, variant)\n check_against_reference(self,\n traced_fn,\n func,\n out_fn,\n (get_sample(),) + sample.args,\n sample.kwargs,\n no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)\n\n # Check alias annotation schema for correctness (make\n # sure inputs that aren't supposed to be modified aren't)\n # Note: only runs in float32 because schema isn't affected by dtype,\n # so running it on all dtypes is would be excessive\n if dtype == torch.float32:\n # TODO: no reason why we cant run this with tracing graph\n if support_script and op.name != \"rsub\":\n check_alias_annotation(name, (get_sample(),) + sample.args, sample.kwargs,\n func_type=func_type, aten_name=op.aten_name)\n\n # TODO: use script graph as well\n checked_shape_analysis = False\n if supports_tracing:\n out = variant(get_sample(), *sample.args, **sample.kwargs)\n\n # right now, tuple of outputs and tensor output supported\n # TODO: list of tensor outputs\n tuple_of_tensors = isinstance(out, tuple) and all([isinstance(elem, torch.Tensor) for elem in out])\n\n if isinstance(out, torch.Tensor) or tuple_of_tensors:\n if tuple_of_tensors:\n sizes = [elem.size() for elem in out]\n else:\n sizes = out.size()\n self.checkShapeAnalysis(sizes, traced_fn.graph, op.assert_jit_shape_analysis)\n checked_shape_analysis = True\n if op.assert_jit_shape_analysis:\n self.assertTrue(checked_shape_analysis)\n\n # Check autodifferentiation of nodes for traced and scripted graphs, only need to check once per sample\n if dtype is torch.float32:\n # Sandcastle doesn't fuse nodes\n if IS_SANDCASTLE:\n # fusible nodes are expected to be found in FusionGroups in the DifferentiableGraphs\n nonfusible_nodes = op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes\n fusible_nodes = []\n else:\n nonfusible_nodes = op.autodiff_nonfusible_nodes\n fusible_nodes = op.autodiff_fusible_nodes\n\n if supports_tracing:\n self.assertAutodiffNode(traced_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)\n if support_script:\n self.assertAutodiffNode(script_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)\n assert tested, \"JIT Test does not execute any logic\"\n\n # alias testing is only done with torch.float for the same reason\n _alias_ops = partial(ops, dtypes=OpDTypes.supported,\n allowed_dtypes=(torch.float,))\n\n @_alias_ops((op for op in op_db if op.aliases))\n def test_jit_alias_remapping(self, device, dtype, op):\n # Required to avoid undefined value: tensor error in JIT compilation of the function template\n tensor = torch.tensor\n\n # NOTE: only tests on first sample\n samples = op.sample_inputs(device, dtype, requires_grad=True)\n sample = first_sample(self, samples)\n\n # [Scripting Data Preparation]\n # Prepare data for test scripting\n # Below we prepare strings of args/kwargs with and without type annotations.\n # These strings are inserted into function template strings which is then torch scripted.\n # - args string is [\"t0\"] corresponding to the \"input\" tensor required by the op\n # - args_kw is the value of args and strings of kwargs used to call the op (without type annotations), for example,\n # [\"to\", \"1.0\", \"(1,)\", \"True\", \"tensor(1.0)\"] -> def fn(t0): return variant(t0, 1.0, (1,), True, tensor(1.0))\n args = [\"t0\"]\n\n def quote_strs(v):\n if isinstance(v, str):\n return f\"'{v}'\"\n\n return str(v)\n\n args_kw = args + \\\n [f\"{v}\" for v in sample.args] + \\\n [f\"{k}={quote_strs(v)}\" for k, v in sample.kwargs.items()]\n\n # Prepare data for test tracing\n sample_args_kwargs = ()\n if len(sample.args) > 0:\n sample_args_kwargs += (sample.args, )\n if len(sample.kwargs) > 0:\n sample_args_kwargs += (sample.kwargs, )\n\n original_name = op.aten_name\n original_name_inplace = original_name + \"_\"\n expected_dtype = op(sample.input, *sample.args, **sample.kwargs).dtype\n\n for a_op in op.aliases:\n inplace = a_op.inplace_variant\n method_or_inplace = [a_op.inplace_variant, a_op.method_variant]\n variants = (v for v in (a_op.op, a_op.method_variant, a_op.inplace_variant) if v is not None)\n\n # Test scripting:\n for variant in variants:\n variant_name = variant.__name__\n op_name = original_name_inplace if variant is inplace else original_name\n\n if variant in method_or_inplace:\n fn_template = '''\n def _fn(t0{c}):\n return t0.{alias_name}({args_kw})\n '''\n # remove the first input tensor\n script = fn_template.format(\n c=\", \" if len(args_kw[1:]) > 1 else \"\",\n args_kw=\", \".join(args_kw[1:]),\n alias_name=variant_name,\n )\n else:\n fn_template = '''\n def _fn({args}):\n return variant({args_kw})\n '''\n script = fn_template.format(\n args=\", \".join(args),\n args_kw=\", \".join(args_kw),\n )\n scripted = torch.jit.CompilationUnit(script)._fn\n\n if (variant is inplace and not torch.can_cast(expected_dtype, dtype)):\n try:\n inp = clone_input_helper(sample.input)\n scripted(inp)\n except Exception as e:\n continue\n self.fail(\"Inplace operation on integer tensor that should be promoted to float didn't fail!\")\n\n inp = clone_input_helper(sample.input)\n scripted(inp)\n inp = clone_input_helper(sample.input)\n graph = scripted.graph_for(inp)\n FileCheck().check(op.aten_name).check_not(variant_name).run(graph)\n\n # Test tracing:\n for variant in variants:\n variant_name = variant.__name__\n op_name = original_name_inplace if variant is inplace else original_name\n\n def _fn(*sample_args, **sample_kwargs):\n return variant(*sample_args, **sample_kwargs)\n\n inp = (clone_input_helper(sample.input),) + sample_args_kwargs\n traced = torch.jit.trace(_fn, *inp)\n inp = (clone_input_helper(sample.input),) + sample_args_kwargs\n traced(*inp)\n inp = (clone_input_helper(sample.input),) + sample_args_kwargs\n graph = traced.graph_for(*inp)\n FileCheck().check(op_name).check_not(variant_name).run(graph)\n\n\ninstantiate_device_type_tests(TestJit, globals())\n\nif __name__ == '__main__':\n run_tests()\n",
"# https://pytorch.org/docs/stable/torch.html#math-operations\n\nimport math\n\nimport torch\n\n\nclass PointwiseOpsModule(torch.nn.Module):\n def __init__(self):\n super(PointwiseOpsModule, self).__init__()\n\n def forward(self):\n return self.pointwise_ops()\n\n def pointwise_ops(self):\n a = torch.randn(4)\n b = torch.randn(4)\n t = torch.tensor([-1, -2, 3], dtype=torch.int8)\n r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)\n t = torch.tensor([-1, -2, 3], dtype=torch.int8)\n s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)\n f = torch.zeros(3)\n g = torch.tensor([-1, 0, 1])\n w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])\n return (\n torch.abs(torch.tensor([-1, -2, 3])),\n torch.absolute(torch.tensor([-1, -2, 3])),\n torch.acos(a),\n torch.arccos(a),\n torch.acosh(a.uniform_(1.0, 2.0)),\n torch.add(a, 20),\n torch.add(a, b, out=a),\n b.add(a),\n b.add(a, out=b),\n b.add_(a),\n b.add(1),\n torch.add(a, torch.randn(4, 1), alpha=10),\n torch.addcdiv(\n torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1\n ),\n torch.addcmul(\n torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1\n ),\n torch.angle(a),\n torch.asin(a),\n torch.arcsin(a),\n torch.asinh(a),\n torch.arcsinh(a),\n torch.atan(a),\n torch.arctan(a),\n torch.atanh(a.uniform_(-1.0, 1.0)),\n torch.arctanh(a.uniform_(-1.0, 1.0)),\n torch.atan2(a, a),\n torch.bitwise_not(t),\n torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),\n torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),\n torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),\n torch.ceil(a),\n torch.ceil(float(torch.tensor(0.5))),\n torch.ceil(torch.tensor(0.5).item()),\n torch.clamp(a, min=-0.5, max=0.5),\n torch.clamp(a, min=0.5),\n torch.clamp(a, max=0.5),\n torch.clip(a, min=-0.5, max=0.5),\n torch.conj(a),\n torch.copysign(a, 1),\n torch.copysign(a, b),\n torch.cos(a),\n torch.cosh(a),\n torch.deg2rad(\n torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])\n ),\n torch.div(a, b),\n a.div(b),\n a.div(1),\n a.div_(b),\n torch.divide(a, b, rounding_mode=\"trunc\"),\n torch.divide(a, b, rounding_mode=\"floor\"),\n torch.digamma(torch.tensor([1.0, 0.5])),\n torch.erf(torch.tensor([0.0, -1.0, 10.0])),\n torch.erfc(torch.tensor([0.0, -1.0, 10.0])),\n torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),\n torch.exp(torch.tensor([0.0, math.log(2.0)])),\n torch.exp(float(torch.tensor(1))),\n torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),\n torch.expm1(torch.tensor([0.0, math.log(2.0)])),\n torch.fake_quantize_per_channel_affine(\n torch.randn(2, 2, 2),\n (torch.randn(2) + 1) * 0.05,\n torch.zeros(2),\n 1,\n 0,\n 255,\n ),\n torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),\n torch.float_power(torch.randint(10, (4,)), 2),\n torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4, -5])),\n torch.floor(a),\n torch.floor(float(torch.tensor(1))),\n torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),\n torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),\n torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),\n torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),\n torch.frac(torch.tensor([1.0, 2.5, -3.2])),\n torch.randn(4, dtype=torch.cfloat).imag,\n torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),\n torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),\n torch.lerp(torch.arange(1.0, 5.0), torch.empty(4).fill_(10), 0.5),\n torch.lerp(\n torch.arange(1.0, 5.0),\n torch.empty(4).fill_(10),\n torch.full_like(torch.arange(1.0, 5.0), 0.5),\n ),\n torch.lgamma(torch.arange(0.5, 2, 0.5)),\n torch.log(torch.arange(5) + 10),\n torch.log10(torch.rand(5)),\n torch.log1p(torch.randn(5)),\n torch.log2(torch.rand(5)),\n torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),\n torch.logaddexp(\n torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3])\n ),\n torch.logaddexp(\n torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3])\n ),\n torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),\n torch.logaddexp2(\n torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3])\n ),\n torch.logaddexp2(\n torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3])\n ),\n torch.logical_and(r, s),\n torch.logical_and(r.double(), s.double()),\n torch.logical_and(r.double(), s),\n torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),\n torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),\n torch.logical_not(torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),\n torch.logical_not(\n torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),\n out=torch.empty(3, dtype=torch.int16),\n ),\n torch.logical_or(r, s),\n torch.logical_or(r.double(), s.double()),\n torch.logical_or(r.double(), s),\n torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),\n torch.logical_xor(r, s),\n torch.logical_xor(r.double(), s.double()),\n torch.logical_xor(r.double(), s),\n torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),\n torch.logit(torch.rand(5), eps=1e-6),\n torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),\n torch.i0(torch.arange(5, dtype=torch.float32)),\n torch.igamma(a, b),\n torch.igammac(a, b),\n torch.mul(torch.randn(3), 100),\n b.mul(a),\n b.mul(5),\n b.mul(a, out=b),\n b.mul_(a),\n b.mul_(5),\n torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),\n torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),\n torch.tensor([float(\"nan\"), float(\"inf\"), -float(\"inf\"), 3.14]),\n torch.nan_to_num(w),\n torch.nan_to_num_(w),\n torch.nan_to_num(w, nan=2.0),\n torch.nan_to_num(w, nan=2.0, posinf=1.0),\n torch.neg(torch.randn(5)),\n # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),\n torch.polygamma(1, torch.tensor([1.0, 0.5])),\n torch.polygamma(2, torch.tensor([1.0, 0.5])),\n torch.polygamma(3, torch.tensor([1.0, 0.5])),\n torch.polygamma(4, torch.tensor([1.0, 0.5])),\n torch.pow(a, 2),\n torch.pow(2, float(torch.tensor(0.5))),\n torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),\n torch.rad2deg(\n torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])\n ),\n torch.randn(4, dtype=torch.cfloat).real,\n torch.reciprocal(a),\n torch.remainder(torch.tensor([-3.0, -2.0]), 2),\n torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),\n torch.round(a),\n torch.round(torch.tensor(0.5).item()),\n torch.rsqrt(a),\n torch.sigmoid(a),\n torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),\n torch.sgn(a),\n torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),\n torch.sin(a),\n torch.sinc(a),\n torch.sinh(a),\n torch.sqrt(a),\n torch.square(a),\n torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),\n b.sub(a),\n b.sub_(a),\n b.sub(5),\n torch.sum(5),\n torch.tan(a),\n torch.tanh(a),\n torch.true_divide(a, a),\n torch.trunc(a),\n torch.trunc_(a),\n torch.xlogy(f, g),\n torch.xlogy(f, g),\n torch.xlogy(f, 4),\n torch.xlogy(2, g),\n )\n\n\nclass ReductionOpsModule(torch.nn.Module):\n def __init__(self):\n super(ReductionOpsModule, self).__init__()\n\n def forward(self):\n return self.reduction_ops()\n\n def reduction_ops(self):\n a = torch.randn(4)\n b = torch.randn(4)\n c = torch.tensor(0.5)\n return (\n torch.argmax(a),\n torch.argmin(a),\n torch.amax(a),\n torch.amin(a),\n torch.aminmax(a),\n torch.all(a),\n torch.any(a),\n torch.max(a),\n a.max(a),\n torch.max(a, 0),\n torch.min(a),\n a.min(a),\n torch.min(a, 0),\n torch.dist(a, b),\n torch.logsumexp(a, 0),\n torch.mean(a),\n torch.mean(a, 0),\n torch.nanmean(a),\n torch.median(a),\n torch.nanmedian(a),\n torch.mode(a),\n torch.norm(a),\n a.norm(2),\n torch.norm(a, dim=0),\n torch.norm(c, torch.tensor(2)),\n torch.nansum(a),\n torch.prod(a),\n torch.quantile(a, torch.tensor([0.25, 0.5, 0.75])),\n torch.quantile(a, 0.5),\n torch.nanquantile(a, torch.tensor([0.25, 0.5, 0.75])),\n torch.std(a),\n torch.std_mean(a),\n torch.sum(a),\n torch.unique(a),\n torch.unique_consecutive(a),\n torch.var(a),\n torch.var_mean(a),\n torch.count_nonzero(a),\n )\n\n\nclass ComparisonOpsModule(torch.nn.Module):\n def __init__(self):\n super(ComparisonOpsModule, self).__init__()\n\n def forward(self):\n a = torch.tensor(0)\n b = torch.tensor(1)\n return (\n torch.allclose(a, b),\n torch.argsort(a),\n torch.eq(a, b),\n torch.eq(a, 1),\n torch.equal(a, b),\n torch.ge(a, b),\n torch.ge(a, 1),\n torch.greater_equal(a, b),\n torch.greater_equal(a, 1),\n torch.gt(a, b),\n torch.gt(a, 1),\n torch.greater(a, b),\n torch.isclose(a, b),\n torch.isfinite(a),\n torch.isin(a, b),\n torch.isinf(a),\n torch.isposinf(a),\n torch.isneginf(a),\n torch.isnan(a),\n torch.isreal(a),\n torch.kthvalue(a, 1),\n torch.le(a, b),\n torch.le(a, 1),\n torch.less_equal(a, b),\n torch.lt(a, b),\n torch.lt(a, 1),\n torch.less(a, b),\n torch.maximum(a, b),\n torch.minimum(a, b),\n torch.fmax(a, b),\n torch.fmin(a, b),\n torch.ne(a, b),\n torch.ne(a, 1),\n torch.not_equal(a, b),\n torch.sort(a),\n torch.topk(a, 1),\n torch.msort(a),\n )\n\n\nclass OtherMathOpsModule(torch.nn.Module):\n def __init__(self):\n super(OtherMathOpsModule, self).__init__()\n\n def forward(self):\n return self.other_ops()\n\n def other_ops(self):\n a = torch.randn(4)\n b = torch.randn(4)\n c = torch.randint(0, 8, (5,), dtype=torch.int64)\n e = torch.randn(4, 3)\n f = torch.randn(4, 4, 4)\n size = [0, 1]\n dims = [0, 1]\n return (\n torch.atleast_1d(a),\n torch.atleast_2d(a),\n torch.atleast_3d(a),\n torch.bincount(c),\n torch.block_diag(a),\n torch.broadcast_tensors(a),\n torch.broadcast_to(a, (4)),\n # torch.broadcast_shapes(a),\n torch.bucketize(a, b),\n torch.cartesian_prod(a),\n torch.cdist(e, e),\n torch.clone(a),\n torch.combinations(a),\n torch.corrcoef(a),\n # torch.cov(a),\n torch.cross(e, e),\n torch.cummax(a, 0),\n torch.cummin(a, 0),\n torch.cumprod(a, 0),\n torch.cumsum(a, 0),\n torch.diag(a),\n torch.diag_embed(a),\n torch.diagflat(a),\n torch.diagonal(e),\n torch.diff(a),\n torch.einsum(\"iii\", f),\n torch.flatten(a),\n torch.flip(e, dims),\n torch.fliplr(e),\n torch.flipud(e),\n torch.kron(a, b),\n torch.rot90(e),\n torch.gcd(c, c),\n torch.histc(a),\n torch.histogram(a),\n torch.meshgrid(a),\n torch.meshgrid(a, indexing=\"xy\"),\n torch.lcm(c, c),\n torch.logcumsumexp(a, 0),\n torch.ravel(a),\n torch.renorm(e, 1, 0, 5),\n torch.repeat_interleave(c),\n torch.roll(a, 1, 0),\n torch.searchsorted(a, b),\n torch.tensordot(e, e),\n torch.trace(e),\n torch.tril(e),\n torch.tril_indices(3, 3),\n torch.triu(e),\n torch.triu_indices(3, 3),\n torch.vander(a),\n torch.view_as_real(torch.randn(4, dtype=torch.cfloat)),\n torch.view_as_complex(torch.randn(4, 2)),\n torch.resolve_conj(a),\n torch.resolve_neg(a),\n )\n\n\nclass SpectralOpsModule(torch.nn.Module):\n def __init__(self):\n super(SpectralOpsModule, self).__init__()\n\n def forward(self):\n return self.spectral_ops()\n\n def spectral_ops(self):\n a = torch.randn(10)\n b = torch.randn(10, 8, 4, 2)\n return (\n torch.stft(a, 8),\n torch.stft(a, torch.tensor(8)),\n torch.istft(b, 8),\n torch.bartlett_window(2, dtype=torch.float),\n torch.blackman_window(2, dtype=torch.float),\n torch.hamming_window(4, dtype=torch.float),\n torch.hann_window(4, dtype=torch.float),\n torch.kaiser_window(4, dtype=torch.float),\n )\n\n\nclass BlasLapackOpsModule(torch.nn.Module):\n def __init__(self):\n super(BlasLapackOpsModule, self).__init__()\n\n def forward(self):\n return self.blas_lapack_ops()\n\n def blas_lapack_ops(self):\n m = torch.randn(3, 3)\n a = torch.randn(10, 3, 4)\n b = torch.randn(10, 4, 3)\n v = torch.randn(3)\n return (\n torch.addbmm(m, a, b),\n torch.addmm(torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3)),\n torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),\n torch.addr(torch.zeros(3, 3), v, v),\n torch.baddbmm(m, a, b),\n torch.bmm(a, b),\n torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)),\n # torch.cholesky(a), # deprecated\n # torch.cholesky_inverse(torch.randn(3, 3)), # had some error\n # torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),\n torch.dot(v, v),\n # torch.linalg.eig(m), # not build with lapack\n # torch.geqrf(a),\n torch.ger(v, v),\n torch.inner(m, m),\n # torch.inverse(m),\n # torch.det(m),\n # torch.logdet(m),\n # torch.slogdet(m),\n # torch.lstsq(m, m),\n # torch.lu(m),\n # torch.lu_solve(m, *torch.lu(m)),\n # torch.lu_unpack(*torch.lu(m)),\n torch.matmul(m, m),\n torch.matrix_power(m, 2),\n # torch.matrix_rank(m),\n torch.matrix_exp(m),\n torch.mm(m, m),\n torch.mv(m, v),\n # torch.orgqr(a, m),\n # torch.ormqr(a, m, v),\n torch.outer(v, v),\n # torch.pinverse(m),\n # torch.qr(a),\n # torch.solve(m, m),\n # torch.svd(a),\n # torch.svd_lowrank(a),\n # torch.pca_lowrank(a),\n # torch.symeig(a), # deprecated\n # torch.lobpcg(a, b), # not supported\n torch.trapz(m, m),\n torch.trapezoid(m, m),\n torch.cumulative_trapezoid(m, m),\n # torch.triangular_solve(m, m),\n torch.vdot(v, v),\n )\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# https://pytorch.org/docs/stable/nn.html\nclass NNConvolutionModule(torch.nn.Module):\n def __init__(self):\n super(NNConvolutionModule, self).__init__()\n self.input1d = torch.randn(1, 4, 36)\n self.input2d = torch.randn(1, 4, 30, 10)\n self.input3d = torch.randn(1, 4, 10, 4, 4)\n self.module1d = nn.ModuleList(\n [\n nn.Conv1d(4, 33, 3),\n nn.ConvTranspose1d(4, 33, 3),\n nn.Fold(output_size=(5, 10), kernel_size=(2, 2)),\n ]\n )\n self.module2d = nn.ModuleList(\n [\n nn.Conv2d(4, 33, 3),\n nn.ConvTranspose2d(4, 33, 3),\n nn.Unfold(kernel_size=3),\n ]\n )\n self.module3d = nn.ModuleList(\n [\n nn.Conv3d(4, 33, 2),\n nn.ConvTranspose3d(4, 33, 3),\n ]\n )\n\n def forward(self):\n return (\n [module(self.input1d) for i, module in enumerate(self.module1d)],\n [module(self.input2d) for i, module in enumerate(self.module2d)],\n [module(self.input3d) for i, module in enumerate(self.module3d)],\n )\n\n\nclass NNPoolingModule(torch.nn.Module):\n def __init__(self):\n super(NNPoolingModule, self).__init__()\n self.input1d = torch.randn(1, 16, 50)\n self.module1d = nn.ModuleList(\n [\n nn.MaxPool1d(3, stride=2),\n nn.AvgPool1d(3, stride=2),\n nn.LPPool1d(2, 3, stride=2),\n nn.AdaptiveMaxPool1d(3),\n nn.AdaptiveAvgPool1d(3),\n ]\n )\n\n self.input2d = torch.randn(1, 16, 30, 10)\n self.module2d = nn.ModuleList(\n [\n nn.MaxPool2d((3, 2), stride=(2, 1)),\n nn.AvgPool2d((3, 2), stride=(2, 1)),\n nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)),\n nn.LPPool2d(2, 3, stride=(2, 1)),\n nn.AdaptiveMaxPool2d((5, 7)),\n nn.AdaptiveAvgPool2d((7)),\n ]\n )\n\n self.input3d = torch.randn(1, 16, 20, 4, 4)\n self.module3d = nn.ModuleList(\n [\n nn.MaxPool3d(2),\n nn.AvgPool3d(2),\n nn.FractionalMaxPool3d(2, output_ratio=(0.5, 0.5, 0.5)),\n nn.AdaptiveMaxPool3d((5, 7, 9)),\n nn.AdaptiveAvgPool3d((5, 7, 9)),\n ]\n )\n # TODO max_unpool\n\n def forward(self):\n return (\n [module(self.input1d) for i, module in enumerate(self.module1d)],\n [module(self.input2d) for i, module in enumerate(self.module2d)],\n [module(self.input3d) for i, module in enumerate(self.module3d)],\n )\n\n\nclass NNPaddingModule(torch.nn.Module):\n def __init__(self):\n super(NNPaddingModule, self).__init__()\n self.input1d = torch.randn(1, 4, 50)\n self.module1d = nn.ModuleList(\n [\n nn.ReflectionPad1d(2),\n nn.ReplicationPad1d(2),\n nn.ConstantPad1d(2, 3.5),\n ]\n )\n\n self.input2d = torch.randn(1, 4, 30, 10)\n self.module2d = nn.ModuleList(\n [\n nn.ReflectionPad2d(2),\n nn.ReplicationPad2d(2),\n nn.ZeroPad2d(2),\n nn.ConstantPad2d(2, 3.5),\n ]\n )\n\n self.input3d = torch.randn(1, 4, 10, 4, 4)\n self.module3d = nn.ModuleList(\n [\n nn.ReflectionPad3d(1),\n nn.ReplicationPad3d(3),\n nn.ConstantPad3d(3, 3.5),\n ]\n )\n\n def forward(self):\n return (\n [module(self.input1d) for i, module in enumerate(self.module1d)],\n [module(self.input2d) for i, module in enumerate(self.module2d)],\n [module(self.input3d) for i, module in enumerate(self.module3d)],\n )\n\n\nclass NNNormalizationModule(torch.nn.Module):\n def __init__(self):\n super(NNNormalizationModule, self).__init__()\n self.input1d = torch.randn(1, 4, 50)\n self.module1d = nn.ModuleList(\n [\n nn.BatchNorm1d(4),\n nn.InstanceNorm1d(4),\n ]\n )\n\n self.input2d = torch.randn(1, 4, 30, 10)\n self.module2d = nn.ModuleList(\n [\n nn.BatchNorm2d(4),\n nn.GroupNorm(4, 4),\n nn.InstanceNorm2d(4),\n nn.LayerNorm([4, 30, 10]),\n nn.LocalResponseNorm(2),\n ]\n )\n\n self.input3d = torch.randn(1, 4, 10, 4, 4)\n self.module3d = nn.ModuleList(\n [\n nn.BatchNorm3d(4),\n nn.InstanceNorm3d(4),\n nn.ChannelShuffle(2),\n ]\n )\n\n def forward(self):\n return (\n [module(self.input1d) for i, module in enumerate(self.module1d)],\n [module(self.input2d) for i, module in enumerate(self.module2d)],\n [module(self.input3d) for i, module in enumerate(self.module3d)],\n )\n\n\nclass NNActivationModule(torch.nn.Module):\n def __init__(self):\n super(NNActivationModule, self).__init__()\n self.activations = nn.ModuleList(\n [\n nn.ELU(),\n nn.Hardshrink(),\n nn.Hardsigmoid(),\n nn.Hardtanh(),\n nn.Hardswish(),\n nn.LeakyReLU(),\n nn.LogSigmoid(),\n # nn.MultiheadAttention(),\n nn.PReLU(),\n nn.ReLU(),\n nn.ReLU6(),\n nn.RReLU(),\n nn.SELU(),\n nn.CELU(),\n nn.GELU(),\n nn.Sigmoid(),\n nn.SiLU(),\n nn.Mish(),\n nn.Softplus(),\n nn.Softshrink(),\n nn.Softsign(),\n nn.Tanh(),\n nn.Tanhshrink(),\n # nn.Threshold(0.1, 20),\n nn.GLU(),\n nn.Softmin(),\n nn.Softmax(),\n nn.Softmax2d(),\n nn.LogSoftmax(),\n # nn.AdaptiveLogSoftmaxWithLoss(),\n ]\n )\n\n def forward(self):\n input = torch.randn(2, 3, 4)\n for i, module in enumerate(self.activations):\n x = module(input)\n return x\n\n\nclass NNRecurrentModule(torch.nn.Module):\n def __init__(self):\n super(NNRecurrentModule, self).__init__()\n self.rnn = nn.ModuleList(\n [\n nn.RNN(4, 8, 2),\n nn.RNNCell(4, 8),\n ]\n )\n self.gru = nn.ModuleList([nn.GRU(4, 8, 2), nn.GRUCell(4, 8)])\n self.lstm = nn.ModuleList(\n [\n nn.LSTM(4, 8, 2),\n nn.LSTMCell(4, 8),\n ]\n )\n\n def forward(self):\n input = torch.randn(5, 3, 4)\n h = torch.randn(2, 3, 8)\n c = torch.randn(2, 3, 8)\n return (\n self.rnn[0](input, h),\n self.rnn[1](input[0], h[0]),\n self.gru[0](input, h),\n self.gru[1](input[0], h[0]),\n self.lstm[0](input, (h, c)),\n self.lstm[1](input[0], (h[0], c[0])),\n )\n\n\nclass NNTransformerModule(torch.nn.Module):\n def __init__(self):\n super(NNTransformerModule, self).__init__()\n self.transformers = nn.ModuleList(\n [\n nn.Transformer(\n d_model=2, nhead=2, num_encoder_layers=1, num_decoder_layers=1\n ),\n nn.TransformerEncoder(\n nn.TransformerEncoderLayer(d_model=2, nhead=2), num_layers=1\n ),\n nn.TransformerDecoder(\n nn.TransformerDecoderLayer(d_model=2, nhead=2), num_layers=1\n ),\n ]\n )\n\n def forward(self):\n input = torch.rand(1, 16, 2)\n tgt = torch.rand((1, 16, 2))\n return (\n self.transformers[0](input, tgt),\n self.transformers[1](input),\n self.transformers[2](input, tgt),\n )\n\n\nclass NNLinearModule(torch.nn.Module):\n def __init__(self):\n super(NNLinearModule, self).__init__()\n self.linears = nn.ModuleList(\n [\n nn.Identity(54),\n nn.Linear(20, 20),\n nn.Bilinear(20, 20, 40),\n # nn.LazyLinear(20, 30),\n ]\n )\n\n def forward(self):\n input = torch.randn(32, 20)\n return (\n self.linears[0](input),\n self.linears[1](input),\n self.linears[2](input, input),\n )\n\n\nclass NNDropoutModule(torch.nn.Module):\n def __init__(self):\n super(NNDropoutModule, self).__init__()\n\n def forward(self):\n a = torch.randn(8, 4)\n b = torch.randn(8, 4, 4, 4)\n c = torch.randn(8, 4, 4, 4, 4)\n return (\n F.dropout(a),\n F.dropout2d(b),\n F.dropout3d(c),\n F.alpha_dropout(a),\n F.feature_alpha_dropout(c),\n )\n\n\nclass NNSparseModule(torch.nn.Module):\n def __init__(self):\n super(NNSparseModule, self).__init__()\n\n def forward(self):\n input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])\n input2 = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])\n embedding_matrix = torch.rand(10, 3)\n offsets = torch.tensor([0, 4])\n return (\n F.embedding(input, embedding_matrix),\n F.embedding_bag(input2, embedding_matrix, offsets),\n F.one_hot(torch.arange(0, 5) % 3, num_classes=5),\n )\n\n\nclass NNDistanceModule(torch.nn.Module):\n def __init__(self):\n super(NNDistanceModule, self).__init__()\n\n def forward(self):\n a = torch.randn(8, 4)\n b = torch.randn(8, 4)\n return (\n F.pairwise_distance(a, b),\n F.cosine_similarity(a, b),\n F.pdist(a),\n )\n\n\nclass NNLossFunctionModule(torch.nn.Module):\n def __init__(self):\n super(NNLossFunctionModule, self).__init__()\n self.x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])\n self.y = torch.LongTensor([[3, 0, -1, 1]])\n\n def forward(self):\n a = torch.randn(3, 2)\n b = torch.rand(3, 2)\n c = torch.rand(3)\n log_probs = torch.randn(50, 16, 20).log_softmax(2).detach()\n targets = torch.randint(1, 20, (16, 30), dtype=torch.long)\n input_lengths = torch.full((16,), 50, dtype=torch.long)\n target_lengths = torch.randint(10, 30, (16,), dtype=torch.long)\n return (\n F.binary_cross_entropy(torch.sigmoid(a), b),\n F.binary_cross_entropy_with_logits(torch.sigmoid(a), b),\n F.poisson_nll_loss(a, b),\n F.cosine_embedding_loss(a, b, c),\n F.cross_entropy(a, b),\n F.ctc_loss(log_probs, targets, input_lengths, target_lengths),\n # F.gaussian_nll_loss(a, b, torch.ones(5, 1)), # ENTER is not supported in mobile module\n F.hinge_embedding_loss(a, b),\n F.kl_div(a, b),\n F.l1_loss(a, b),\n F.mse_loss(a, b),\n F.margin_ranking_loss(c, c, c),\n F.multilabel_margin_loss(self.x, self.y),\n F.multilabel_soft_margin_loss(self.x, self.y),\n F.multi_margin_loss(self.x, torch.tensor([3])),\n F.nll_loss(a, torch.tensor([1, 0, 1])),\n F.huber_loss(a, b),\n F.smooth_l1_loss(a, b),\n F.soft_margin_loss(a, b),\n F.triplet_margin_loss(a, b, -b),\n # F.triplet_margin_with_distance_loss(a, b, -b), # can't take variable number of arguments\n )\n\n\nclass NNVisionModule(torch.nn.Module):\n def __init__(self):\n super(NNVisionModule, self).__init__()\n self.input = torch.randn(1, 4, 9, 9)\n self.vision_modules = nn.ModuleList(\n [\n nn.PixelShuffle(2),\n nn.PixelUnshuffle(3),\n nn.Upsample(scale_factor=2, mode=\"nearest\"),\n nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n nn.Upsample(scale_factor=2, mode=\"bicubic\"),\n nn.UpsamplingNearest2d(scale_factor=2),\n nn.UpsamplingBilinear2d(scale_factor=2),\n ]\n )\n self.linear_sample = nn.Upsample(scale_factor=2, mode=\"linear\")\n self.trilinear_sample = nn.Upsample(scale_factor=2, mode=\"trilinear\")\n\n def forward(self):\n input = torch.randn(1, 3, 16, 16)\n return (\n [module(self.input) for i, module in enumerate(self.vision_modules)],\n self.linear_sample(torch.randn(4, 9, 9)),\n self.trilinear_sample(torch.randn(1, 3, 4, 9, 9)),\n F.grid_sample(input, torch.ones(1, 4, 4, 2)),\n )\n\n\nclass NNShuffleModule(torch.nn.Module):\n def __init__(self):\n super(NNShuffleModule, self).__init__()\n self.shuffle = nn.ChannelShuffle(2)\n\n def forward(self):\n return (self.shuffle(torch.randn(1, 4, 2, 2)),)\n\n\nclass NNUtilsModule(torch.nn.Module):\n def __init__(self):\n super(NNUtilsModule, self).__init__()\n self.flatten = nn.Sequential(\n nn.Linear(50, 50),\n nn.Unflatten(1, (2, 5, 5))\n )\n\n def forward(self):\n input = torch.randn(2, 50)\n return (\n self.flatten(input),\n )\n"
] | [
[
"torch.jit.trace",
"torch.testing._internal.jit_utils.disable_autodiff_subgraph_inlining",
"torch.set_default_dtype",
"torch.testing._internal.common_utils.first_sample",
"torch.testing._internal.common_utils.clone_input_helper",
"torch.testing._internal.jit_metaprogramming_utils.create_traced_fn",
"torch.testing.FileCheck",
"torch.testing._internal.jit_metaprogramming_utils.create_script_fn",
"torch.can_cast",
"torch.device",
"torch.jit.CompilationUnit",
"torch.testing._internal.common_utils.run_tests",
"torch.testing._internal.jit_utils.is_lambda"
],
[
"torch.all",
"torch.arctan",
"torch.randint",
"torch.sinc",
"torch.max",
"torch.zeros",
"torch.sin",
"torch.ge",
"torch.greater_equal",
"torch.combinations",
"torch.histc",
"torch.clip",
"torch.acos",
"torch.rsqrt",
"torch.lcm",
"torch.cumprod",
"torch.topk",
"torch.conj",
"torch.fmax",
"torch.pow",
"torch.isclose",
"torch.outer",
"torch.isreal",
"torch.flipud",
"torch.sqrt",
"torch.randn",
"torch.median",
"torch.fliplr",
"torch.tril",
"torch.equal",
"torch.logical_xor",
"torch.square",
"torch.bmm",
"torch.kthvalue",
"torch.var_mean",
"torch.arcsinh",
"torch.matrix_exp",
"torch.min",
"torch.msort",
"torch.minimum",
"torch.tril_indices",
"torch.amin",
"torch.resolve_neg",
"torch.broadcast_to",
"torch.cross",
"torch.ravel",
"torch.atan2",
"torch.count_nonzero",
"torch.tan",
"torch.asinh",
"torch.any",
"torch.arccos",
"torch.broadcast_tensors",
"torch.gt",
"torch.trapezoid",
"torch.bincount",
"torch.bitwise_not",
"torch.isneginf",
"torch.diag_embed",
"torch.sum",
"torch.lt",
"torch.le",
"torch.repeat_interleave",
"torch.kaiser_window",
"torch.logical_and",
"torch.norm",
"torch.igamma",
"torch.einsum",
"torch.clone",
"torch.histogram",
"torch.argmin",
"torch.tensor",
"torch.divide",
"torch.fake_quantize_per_tensor_affine",
"torch.isposinf",
"torch.reciprocal",
"torch.sort",
"torch.cartesian_prod",
"torch.rand",
"torch.argsort",
"torch.vdot",
"torch.igammac",
"torch.atan",
"torch.dot",
"torch.tensordot",
"torch.mv",
"torch.isinf",
"torch.floor",
"torch.vander",
"torch.std_mean",
"torch.trunc",
"torch.block_diag",
"torch.diag",
"torch.addbmm",
"torch.atleast_2d",
"torch.corrcoef",
"torch.istft",
"torch.nan_to_num",
"torch.nansum",
"torch.trapz",
"torch.quantile",
"torch.matmul",
"torch.meshgrid",
"torch.hann_window",
"torch.maximum",
"torch.stft",
"torch.ger",
"torch.bartlett_window",
"torch.renorm",
"torch.inner",
"torch.matrix_power",
"torch.unique",
"torch.cummax",
"torch.triu",
"torch.trace",
"torch.mm",
"torch.add",
"torch.cummin",
"torch.round",
"torch.isin",
"torch.rot90",
"torch.sgn",
"torch.isfinite",
"torch.std",
"torch.atleast_1d",
"torch.cumulative_trapezoid",
"torch.arange",
"torch.greater",
"torch.trunc_",
"torch.roll",
"torch.cos",
"torch.angle",
"torch.atleast_3d",
"torch.logical_or",
"torch.cosh",
"torch.ne",
"torch.ceil",
"torch.diagonal",
"torch.hamming_window",
"torch.mode",
"torch.true_divide",
"torch.resolve_conj",
"torch.dist",
"torch.less_equal",
"torch.cumsum",
"torch.aminmax",
"torch.sinh",
"torch.blackman_window",
"torch.arcsin",
"torch.mean",
"torch.nanmean",
"torch.tanh",
"torch.cdist",
"torch.flatten",
"torch.allclose",
"torch.searchsorted",
"torch.logsumexp",
"torch.amax",
"torch.eq",
"torch.nanmedian",
"torch.asin",
"torch.unique_consecutive",
"torch.kron",
"torch.triu_indices",
"torch.prod",
"torch.div",
"torch.nan_to_num_",
"torch.sigmoid",
"torch.fmin",
"torch.empty",
"torch.less",
"torch.xlogy",
"torch.copysign",
"torch.diagflat",
"torch.flip",
"torch.logcumsumexp",
"torch.diff",
"torch.baddbmm",
"torch.gcd",
"torch.isnan",
"torch.not_equal",
"torch.var",
"torch.clamp",
"torch.bucketize",
"torch.argmax"
],
[
"torch.nn.Hardshrink",
"torch.nn.functional.poisson_nll_loss",
"torch.randint",
"torch.nn.functional.multilabel_soft_margin_loss",
"torch.nn.LPPool2d",
"torch.nn.functional.dropout",
"torch.nn.GRU",
"torch.nn.PixelUnshuffle",
"torch.randn",
"torch.nn.Softsign",
"torch.nn.functional.soft_margin_loss",
"torch.nn.SELU",
"torch.nn.GroupNorm",
"torch.nn.UpsamplingNearest2d",
"torch.nn.Tanhshrink",
"torch.full",
"torch.nn.functional.triplet_margin_loss",
"torch.nn.ReplicationPad1d",
"torch.nn.Conv2d",
"torch.nn.ReflectionPad3d",
"torch.nn.functional.mse_loss",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.InstanceNorm2d",
"torch.nn.Bilinear",
"torch.nn.BatchNorm2d",
"torch.nn.RNNCell",
"torch.nn.InstanceNorm3d",
"torch.nn.Mish",
"torch.nn.ReflectionPad2d",
"torch.nn.ConvTranspose3d",
"torch.nn.functional.cross_entropy",
"torch.nn.LPPool1d",
"torch.nn.Unfold",
"torch.nn.GLU",
"torch.nn.functional.l1_loss",
"torch.nn.Fold",
"torch.nn.Hardswish",
"torch.nn.RNN",
"torch.FloatTensor",
"torch.nn.ReplicationPad2d",
"torch.nn.TransformerDecoderLayer",
"torch.nn.ConstantPad2d",
"torch.nn.functional.feature_alpha_dropout",
"torch.tensor",
"torch.nn.Sigmoid",
"torch.nn.LSTMCell",
"torch.nn.functional.cosine_embedding_loss",
"torch.nn.LogSigmoid",
"torch.rand",
"torch.LongTensor",
"torch.nn.AdaptiveMaxPool1d",
"torch.nn.LogSoftmax",
"torch.nn.functional.pairwise_distance",
"torch.nn.functional.ctc_loss",
"torch.nn.ReflectionPad1d",
"torch.nn.LSTM",
"torch.nn.Softshrink",
"torch.nn.RReLU",
"torch.nn.Tanh",
"torch.nn.Upsample",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"torch.nn.AvgPool1d",
"torch.nn.functional.dropout2d",
"torch.nn.functional.hinge_embedding_loss",
"torch.nn.functional.margin_ranking_loss",
"torch.nn.LocalResponseNorm",
"torch.nn.ELU",
"torch.nn.functional.alpha_dropout",
"torch.nn.AvgPool3d",
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.Softplus",
"torch.ones",
"torch.nn.InstanceNorm1d",
"torch.nn.MaxPool1d",
"torch.nn.FractionalMaxPool2d",
"torch.nn.CELU",
"torch.arange",
"torch.nn.ConvTranspose2d",
"torch.nn.PixelShuffle",
"torch.nn.UpsamplingBilinear2d",
"torch.nn.TransformerEncoderLayer",
"torch.nn.Conv3d",
"torch.nn.functional.cosine_similarity",
"torch.nn.LeakyReLU",
"torch.nn.Softmin",
"torch.nn.Hardsigmoid",
"torch.nn.Unflatten",
"torch.nn.SiLU",
"torch.nn.functional.embedding",
"torch.nn.GELU",
"torch.nn.functional.dropout3d",
"torch.nn.functional.multilabel_margin_loss",
"torch.nn.AdaptiveMaxPool3d",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.Identity",
"torch.nn.functional.kl_div",
"torch.nn.ConstantPad1d",
"torch.nn.Softmax",
"torch.nn.FractionalMaxPool3d",
"torch.nn.ReplicationPad3d",
"torch.nn.Transformer",
"torch.nn.functional.huber_loss",
"torch.nn.functional.smooth_l1_loss",
"torch.nn.functional.embedding_bag",
"torch.nn.GRUCell",
"torch.nn.ZeroPad2d",
"torch.nn.ConstantPad3d",
"torch.nn.BatchNorm1d",
"torch.sigmoid",
"torch.nn.ReLU6",
"torch.nn.PReLU",
"torch.nn.Softmax2d",
"torch.nn.Conv1d",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.functional.pdist",
"torch.nn.Hardtanh",
"torch.nn.LayerNorm",
"torch.nn.MaxPool2d",
"torch.nn.MaxPool3d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ConvTranspose1d",
"torch.nn.ChannelShuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlexPC23/Python | [
"77689d74c5444faa1aa253a122602307e52ac581"
] | [
"Spyder/Ejercicios/Comparacion medias.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 6 19:54:52 2021\n\n@author: Alex\n\"\"\"\n\nimport os #sistema operativo\nimport pandas as pd #gestionar datframes\nimport numpy as np #numeric python (vectores, matrices,...)\nimport matplotlib.pyplot as plt #graficos\nimport scipy.stats as stats #Tests estadisticos\nimport seaborn as sns #Graficos pro\n\nos.chdir('C:/Programacion Estadistica PEP/ejercicio comparacion medias')\nos.getcwd()\nwbr = pd.read_csv('USA_cars_datasets.csv', sep=',', decimal='.')\n#Media del precio de los coches\nwbr.price.describe()\nplt.hist(wbr.price)\nplt.xlabel('Price')\nplt.ylabel('Frequency')\nprops = dict(boxstyle= 'round', facecolor='white', lw=0.5)\nplt.text(55000,550,'Mean:18767.67''\\n''n:2499' '\\n' 'std:12116.09', bbox=props)\nplt.title('Number of cars sold by price ''\\n')\nplt.show()\n\n\nwbr.mileage.describe()\n#Kilometraje coches\nwbr.loc [(wbr['mileage']<50000), \"mileage_cat2\"] = \"1: Poco kilometraje\"\nwbr.loc [(wbr['mileage']>=50000) & (wbr['mileage']<150000), \"mileage_cat2\"] = \"2: Kilometraje normal\"\nwbr.loc [(wbr['mileage']>=150000), \"mileage_cat2\"] = \"3: Alto kilometraje\"\n\nmytable = pd.crosstab(index=wbr[\"mileage_cat2\"], columns=\"count\")\nn=mytable.sum()\nmytable2 = (mytable/n)*100\nplt.bar(mytable2.index, mytable2['count'])\n\n#2º Hacer el test\n#Comparacion descriptiva:\nwbr.groupby('mileage_cat2').price.mean()\n\n#Comparacion estadistica:\n#Extraer las muestras y guardalas en objetos:\nprice_pocoskm=wbr.loc [(wbr['mileage']<50000), \"price\"] \nprice_normalkm=wbr.loc [(wbr['mileage']>=50000) & (wbr['mileage']<150000), \"price\"] \nprice_muchoskm=wbr.loc [(wbr['mileage']>=150000), \"price\"]\n\n#Hacer F DE FISHER de las medias para comparar\nres = stats.f_oneway(price_pocoskm, price_normalkm, price_muchoskm)\n#pvalue= 5.077309184346995e-110\nprint(res)\nprint('F:', round(res[0],3), 'PValue:', round(res[1],3))\n\n\n#COMPARACION GRAFICA: intervalos de confianza para las medias\n\nplt.figure(figsize=(7,5))\nax = sns.pointplot(x=\"mileage_cat2\", y=\"price\", data=wbr, capsize=0.05, ci=95, join=0)\nax.set_ylabel('')\n\nplt.axhline(y=wbr.price.mean(), linewidth=1, linestyle= 'dashed', color=\"green\")\nprops = dict(boxstyle='round', facecolor='white', lw=0.5)\nplt.text(1.5, 5000, 'Mean:18767.67''\\n''n:2499' '\\n' 'F: 278.83''\\n' 'Pval.: 0.000', bbox=props)\nplt.xlabel('Kilometraje')\nplt.title('Average rentals by mileage''\\n')\n\n\n\n"
] | [
[
"scipy.stats.f_oneway",
"pandas.crosstab",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
happog/PaddleOCR | [
"5ed1e2427b4e1759f0e9278f453e8d497db33b59"
] | [
"deploy/pdserving/ocr_local_server.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom paddle_serving_client import Client\nfrom paddle_serving_app.reader import OCRReader\nimport cv2\nimport sys\nimport numpy as np\nimport os\nfrom paddle_serving_client import Client\nfrom paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor\nfrom paddle_serving_app.reader import Div, Normalize, Transpose\nfrom paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes\nif sys.argv[1] == 'gpu':\n from paddle_serving_server_gpu.web_service import WebService\nelif sys.argv[1] == 'cpu':\n from paddle_serving_server.web_service import WebService\nfrom paddle_serving_app.local_predict import Debugger\nimport time\nimport re\nimport base64\n\n\nclass OCRService(WebService):\n def init_det_debugger(self, det_model_config):\n self.det_preprocess = Sequential([\n ResizeByFactor(32, 960), Div(255),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(\n (2, 0, 1))\n ])\n self.det_client = Debugger()\n if sys.argv[1] == 'gpu':\n self.det_client.load_model_config(\n det_model_config, gpu=True, profile=False)\n elif sys.argv[1] == 'cpu':\n self.det_client.load_model_config(\n det_model_config, gpu=False, profile=False)\n self.ocr_reader = OCRReader()\n\n def preprocess(self, feed=[], fetch=[]):\n data = base64.b64decode(feed[0][\"image\"].encode('utf8'))\n data = np.fromstring(data, np.uint8)\n im = cv2.imdecode(data, cv2.IMREAD_COLOR)\n ori_h, ori_w, _ = im.shape\n det_img = self.det_preprocess(im)\n _, new_h, new_w = det_img.shape\n det_img = det_img[np.newaxis, :]\n det_img = det_img.copy()\n det_out = self.det_client.predict(\n feed={\"image\": det_img}, fetch=[\"concat_1.tmp_0\"])\n filter_func = FilterBoxes(10, 10)\n post_func = DBPostProcess({\n \"thresh\": 0.3,\n \"box_thresh\": 0.5,\n \"max_candidates\": 1000,\n \"unclip_ratio\": 1.5,\n \"min_size\": 3\n })\n sorted_boxes = SortedBoxes()\n ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]\n dt_boxes_list = post_func(det_out[\"concat_1.tmp_0\"], [ratio_list])\n dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])\n dt_boxes = sorted_boxes(dt_boxes)\n get_rotate_crop_image = GetRotateCropImage()\n img_list = []\n max_wh_ratio = 0\n for i, dtbox in enumerate(dt_boxes):\n boximg = get_rotate_crop_image(im, dt_boxes[i])\n img_list.append(boximg)\n h, w = boximg.shape[0:2]\n wh_ratio = w * 1.0 / h\n max_wh_ratio = max(max_wh_ratio, wh_ratio)\n if len(img_list) == 0:\n return [], []\n _, w, h = self.ocr_reader.resize_norm_img(img_list[0],\n max_wh_ratio).shape\n imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')\n for id, img in enumerate(img_list):\n norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)\n imgs[id] = norm_img\n feed = {\"image\": imgs.copy()}\n fetch = [\"ctc_greedy_decoder_0.tmp_0\", \"softmax_0.tmp_0\"]\n return feed, fetch\n\n def postprocess(self, feed={}, fetch=[], fetch_map=None):\n rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)\n res_lst = []\n for res in rec_res:\n res_lst.append(res[0])\n res = {\"res\": res_lst}\n return res\n\n\nocr_service = OCRService(name=\"ocr\")\nocr_service.load_model_config(\"ocr_rec_model\")\nocr_service.prepare_server(workdir=\"workdir\", port=9292)\nocr_service.init_det_debugger(det_model_config=\"ocr_det_model\")\nif sys.argv[1] == 'gpu':\n ocr_service.run_debugger_service(gpu=True)\nelif sys.argv[1] == 'cpu':\n ocr_service.run_debugger_service()\nocr_service.run_web_service()\n"
] | [
[
"numpy.fromstring"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PMARINA/COS-429 | [
"25134e77101279c3f9f16a6738beb6170ba1fd09"
] | [
"Assignment 0/Part 2 - Getting Familiar with Python/SettingPixels.py"
] | [
"import numpy as np\nimport cv2\nimport os\nwindow_title = \"The Input Image\"\ninput_image = \"input.jpg\"\noutput_image = os.path.basename(__file__)[:-len(\".py\")] + \".jpg\"\nHORIZONTAL = 0\nVERTICAL = 1\n\ndef read_image(file_name = input_image):\n img = cv2.imread(file_name)\n return img\n\ndef display_image(img,window_title = window_title):\n cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)\n cv2.imshow(window_title,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return\n\ndef grayscale(img):\n grayscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #=6, BGR and not RGB because of how cv2 returns images\n return grayscale\n\ndef save_to_disk(img,filename=output_image):\n cv2.imwrite(filename,img)\n\ndef get_dimensions_hw(img):\n return img.shape[0:2]\n\ndef get_middle_pixels_hw(img, new_height, new_width):\n input_img_h,input_img_w = get_dimensions_hw(img)\n if new_height > input_img_h:\n raise ValueError(\"Requested new height (\" + str(new_height) + \") is greater than image height (\" + str(input_img_h) + \").\")\n if new_width > input_img_w:\n raise ValueError(\"Requested new width (\" + str(new_width) + \") is greater than image width (\" + str(input_img_w) + \").\")\n middle_h = round(input_img_h/2)\n half_new_height = round(new_height/2)\n middle_w = round(input_img_w/2)\n half_new_width = round(new_width/2)\n middle_pixels = img[middle_h-half_new_height:middle_h+half_new_height,middle_w-half_new_width:middle_w+half_new_width]\n return middle_pixels\n\ndef set_periodic_pixel(img, frequency, direction, new_pixel):\n h,w = get_dimensions_hw(img)\n img = np.array(img,copy=True)\n if direction == HORIZONTAL:\n for i in range(0,h):\n for j in range(0,w,frequency):\n img[i][j] = new_pixel\n elif direction == VERTICAL:\n for i in range(0,h,frequency):\n for j in range(0,w):\n img[i][j] = new_pixel\n return img \n \n\nif __name__ == \"__main__\":\n img = read_image()\n revised = set_periodic_pixel(img,10,HORIZONTAL,0)\n revised = set_periodic_pixel(revised, 20, VERTICAL, 0)\n save_to_disk(revised)\n display_image(revised)\n\n#Note: Owing to the large input image used for this example, the program will not show all\n#lines unless you zoom in on the saved file (unless your monitor happens to have enough\n#resolution...)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
raymondEhlers/uproot4 | [
"b266614eb3e24d02fa5ed2be4a2d95ab71a5e499"
] | [
"tests/test_0017-multi-basket-multi-branch-fetch.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE\n\nfrom __future__ import absolute_import\n\nimport sys\nimport json\n\ntry:\n from io import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nimport numpy\nimport pytest\nimport skhep_testdata\n\nimport uproot4\nimport uproot4.interpretation.numerical\nimport uproot4.interpretation.library\nimport uproot4.source.futures\n\n\ndef test_any_basket():\n interpretation = uproot4.interpretation.numerical.AsDtype(\">i4\")\n\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample/i4\"] as branch:\n assert branch.basket(0).array(interpretation).tolist() == [\n -15,\n -14,\n -13,\n -12,\n -11,\n -10,\n -9,\n ]\n assert branch.basket(1).array(interpretation).tolist() == [\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n ]\n assert branch.basket(2).array(interpretation).tolist() == [\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n ]\n assert branch.basket(3).array(interpretation).tolist() == [\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n ]\n assert branch.basket(4).array(interpretation).tolist() == [\n 13,\n 14,\n ]\n\n\ndef test_stitching_arrays():\n interpretation = uproot4.interpretation.numerical.AsDtype(\"i8\")\n expectation = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\n basket_arrays = [[0, 1, 2, 3, 4], [5, 6], [], [7, 8, 9], [10], [11, 12, 13, 14]]\n basket_arrays = [numpy.array(x) for x in basket_arrays]\n entry_offsets = numpy.array([0, 5, 7, 7, 10, 11, 15])\n library = uproot4.interpretation.library._libraries[\"np\"]\n\n for start in range(16):\n for stop in range(15, -1, -1):\n actual = interpretation.final_array(\n basket_arrays, start, stop, entry_offsets, library, None\n )\n assert expectation[start:stop] == actual.tolist()\n\n\ndef _names_entries_to_ranges_or_baskets(self, branch_names, entry_start, entry_stop):\n out = []\n for name in branch_names:\n branch = self[name]\n for basket_num, range_or_basket in branch.entries_to_ranges_or_baskets(\n entry_start, entry_stop\n ):\n out.append((branch, basket_num, range_or_basket))\n return out\n\n\ndef test_names_entries_to_ranges_or_baskets():\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample\"] as sample:\n out = _names_entries_to_ranges_or_baskets(sample, [\"i4\"], 0, 30)\n assert [x[1] for x in out] == [0, 1, 2, 3, 4]\n assert [x[2] for x in out] == [\n (6992, 7091),\n (16085, 16184),\n (25939, 26038),\n (35042, 35141),\n (40396, 40475),\n ]\n\n\ndef test_ranges_or_baskets_to_arrays():\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample\"] as sample:\n branch = sample[\"i4\"]\n\n ranges_or_baskets = _names_entries_to_ranges_or_baskets(sample, [\"i4\"], 0, 30)\n branchid_interpretation = {\n branch.cache_key: uproot4.interpretation.numerical.AsDtype(\">i4\")\n }\n entry_start, entry_stop = (0, 30)\n decompression_executor = uproot4.source.futures.TrivialExecutor()\n interpretation_executor = uproot4.source.futures.TrivialExecutor()\n library = uproot4.interpretation.library._libraries[\"np\"]\n\n arrays = {}\n uproot4.behaviors.TBranch._ranges_or_baskets_to_arrays(\n sample,\n ranges_or_baskets,\n branchid_interpretation,\n entry_start,\n entry_stop,\n decompression_executor,\n interpretation_executor,\n library,\n arrays,\n )\n assert arrays[branch.cache_key].tolist() == [\n -15,\n -14,\n -13,\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_1(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n assert branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"), library=\"np\"\n ).tolist() == [\n -15,\n -14,\n -13,\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_2(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n assert branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"),\n entry_start=3,\n entry_stop=-5,\n library=\"np\",\n ).tolist() == [\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_3(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n assert branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"),\n entry_start=3,\n entry_stop=-5,\n interpretation_executor=uproot4.decompression_executor,\n library=\"np\",\n ).tolist() == [\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_4(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n with pytest.raises(ValueError):\n branch.array(uproot4.interpretation.numerical.AsDtype(\">i8\"), library=\"np\")\n\n\ndef test_cache():\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n object_cache=100,\n array_cache=\"100 MB\",\n ) as f:\n assert f.cache_key == \"db4be408-93ad-11ea-9027-d201a8c0beef:/\"\n assert f[\"sample\"].cache_key == \"db4be408-93ad-11ea-9027-d201a8c0beef:/sample;1\"\n assert (\n f[\"sample/i4\"].cache_key\n == \"db4be408-93ad-11ea-9027-d201a8c0beef:/sample;1:i4(16)\"\n )\n i4 = f[\"sample/i4\"]\n assert list(f.file.array_cache) == []\n i4.array(uproot4.interpretation.numerical.AsDtype(\">i4\"), library=\"np\")\n assert list(f.file.array_cache) == [\n \"db4be408-93ad-11ea-9027-d201a8c0beef:/sample;1:i4(16):AsDtype(Bi4(),Li4()):0-30:np\"\n ]\n\n with pytest.raises(OSError):\n i4.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"), entry_start=3, library=\"np\"\n )\n\n i4.array(uproot4.interpretation.numerical.AsDtype(\">i4\"), library=\"np\")\n\n\ndef test_pandas():\n pandas = pytest.importorskip(\"pandas\")\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample/i4\"] as branch:\n series = branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"),\n entry_start=3,\n entry_stop=-5,\n interpretation_executor=uproot4.decompression_executor,\n library=\"pd\",\n )\n assert isinstance(series, pandas.Series)\n assert series.values.tolist() == [\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sakshamji/FacemaskDetection | [
"b274285ebaef51c110fab3dc608a2c2ef956ec95"
] | [
"facemask.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 15:56:47 2020\n\n@author: Saksham\n\"\"\"\n\n\nimport numpy as np\nimport keras\nimport keras.backend as k\nfrom keras.layers import Conv2D,MaxPooling2D,SpatialDropout2D,Flatten,Dropout,Dense\nfrom keras.models import Sequential,load_model\nfrom keras.optimizers import adam\nfrom keras.preprocessing import image\nimport cv2\nimport datetime\n\n\n# UNCOMMENT THE FOLLOWING CODE TO TRAIN THE CNN FROM SCRATCH\n\n# BUILDING MODEL TO CLASSIFY BETWEEN MASK AND NO MASK\n\nmodel=Sequential()\nmodel.add(Conv2D(32,(3,3),activation='relu',input_shape=(150,150,3)))\nmodel.add(MaxPooling2D() )\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPooling2D() )\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPooling2D() )\nmodel.add(Flatten())\nmodel.add(Dense(100,activation='relu'))\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n\nfrom keras.preprocessing.image import ImageDataGenerator\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntraining_set = train_datagen.flow_from_directory(\n 'train',\n target_size=(150,150),\n batch_size=16 ,\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n 'test',\n target_size=(150,150),\n batch_size=16,\n class_mode='binary')\n\nmodel_saved=model.fit_generator(\n training_set,\n epochs=10,\n validation_data=test_set,\n\n )\n\nmodel.save('mymodel.h5',model_saved)\n\n#To test for individual images\n\nmymodel=load_model('mymodel.h5')\n#test_image=image.load_img('C:/Users/saksham/Desktop/ML Datasets/Face Mask Detection/Dataset/test/without_mask/30.jpg',target_size=(150,150,3))\ntest_image=image.load_img(r'C:/Users/saksham/Desktop/FaceMaskDetector/test/with_mask/1-with-mask.jpg',\n target_size=(150,150,3))\ntest_image\ntest_image=image.img_to_array(test_image)\ntest_image=np.expand_dims(test_image,axis=0)\nmymodel.predict(test_image)[0][0]\n\n\n# IMPLEMENTING LIVE DETECTION OF FACE MASK\n\nmymodel=load_model('mymodel.h5')\n\ncap=cv2.VideoCapture(0)\nface_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nwhile cap.isOpened():\n _,img=cap.read()\n face=face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=4)\n for(x,y,w,h) in face:\n face_img = img[y:y+h, x:x+w]\n cv2.imwrite('temp.jpg',face_img)\n test_image=image.load_img('temp.jpg',target_size=(150,150,3))\n test_image=image.img_to_array(test_image)\n test_image=np.expand_dims(test_image,axis=0)\n pred=mymodel.predict(test_image)[0][0]\n if pred==1:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)\n cv2.putText(img,'NO MASK',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)\n else:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)\n cv2.putText(img,'MASK',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),3)\n datet=str(datetime.datetime.now())\n cv2.putText(img,datet,(400,450),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1)\n \n cv2.imshow('img',img)\n \n if cv2.waitKey(1)==ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AjayJohnAlex/ANN | [
"236bc4ca4aaa07038610bc6870578b1f0255da49"
] | [
"Improving the ANN.py"
] | [
"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\ndataset = pd.read_csv('Churn_Modelling.csv')\ndataset.head()\n\n\n# In[3]:\n\n\nX = dataset.iloc[:,3:13].values\n\n\n# In[4]:\n\n\ny = dataset.iloc[:,13].values\n\n\n# In[5]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[6]:\n\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\n\n\n# In[7]:\n\n\n# we have some object type data and hence need to convert\n# them into int\nlabelencoder1 = LabelEncoder()\n# for geography\nX[:,1] = labelencoder1.fit_transform(X[:,1])\n\n\n# In[8]:\n\n\n# we have some object type data and hence need to convert\n# them into int\nlabelencoder2 = LabelEncoder()\n# for gender\nX[:,2] = labelencoder2.fit_transform(X[:,2])\n\n\n# In[9]:\n\n\n# we need to create dummy values for geography and drop the\n# the unwanted column out of it \n\nonehotencoder = OneHotEncoder(categorical_features=[1])\n\n\n# In[10]:\n\n\nX = onehotencoder.fit_transform(X).toarray()\n\n\n# In[11]:\n\n\n# removing the first dummy class\nX = X [:,1:]\n\n\n# In[12]:\n\n\nX.shape\n\n\n# In[13]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(\nX, y, test_size=0.30, random_state=101)\n\n\n# In[14]:\n\n\n# feature scaling \nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[15]:\n\n\nsc = StandardScaler()\n\n\n# In[16]:\n\n\nX_train = sc.fit_transform(X_train)\n\n\n# In[17]:\n\n\nX_test = sc.transform(X_test)\n\n\n# In[18]:\n\n\n# implementing k 4 cross validation to make better pred\n# keras classifier wrapper and it expects a function to \n# returned as its builds the architecture of ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\n# k4 models \nfrom sklearn.model_selection import cross_val_score\n# to initialise the ANN\nfrom keras.models import Sequential\n# dense model to build layers of ann\nfrom keras.layers import Dense\n\n\n# In[23]:\n\n\n# the classifer is local in fn\ndef build_classifier():\n \n # you can initialise a ANN in 2 ways \n # either def sequence of layers \n # or def by a graph\n\n # object of sequencial\n classifier = Sequential()\n # adding 2 layers : input layer and first hidden layer\n # units = no of hidden layers\n # kernal_initializer = initilaise weights using function\n # activation = activation function\n # input_dim = no of features in the input \n classifier.add(Dense(units=6,kernel_initializer='uniform',\n activation= 'relu',input_dim=11))\n \n # we will add one more hidden layer even though its not \n # neccesarry \n # we are adding it so that we can learn how to add one more\n # layer \n # and deep learning has many hiiden layers in ANN\n classifier.add(Dense(units=6,kernel_initializer='uniform',\n activation= 'relu'))\n \n # adding output layer \n classifier.add(Dense(units=1,kernel_initializer='uniform',\n activation= 'sigmoid'))\n \n # compile the ANN by applying stochastic GD\n # optimizer = the algo we need to use to find the optimal \n # weights ...there are many we wld use ADAM\n # loss = SGD is based on lost function we needs to be optimised\n # \n classifier.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n \n return classifier\n\n\n# In[24]:\n\n\n# new classifier\nclassifier = KerasClassifier(\n build_fn=build_classifier,batch_size =10,epochs = 100)\n\n\n# In[21]:\n\n\n# now we use cross value score from sklearn \n# k4 classification is used to get a relevant pred\n# it wld return 10 accuracy\n\n# accuracies = cross_val_score(estimator=classifier,X=X_train,y=y_train,cv=10,n_jobs = 1)\n\n\n# In[22]:\n\n\nmean = accuracies.mean()\nvariance = accuracies.std()\nprint('mean is ',mean)\nprint('variance is ',variance)\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.LabelEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TomLXXVI/pipe-network-sim | [
"49e42621180ec3125afa238d3ca56ae9f3a7662a"
] | [
"lib/nummath/deriv.py"
] | [
"import numpy as np\n\n\nclass Deriv:\n \"\"\"\n Calculate the derivative with given order of the function f(t) at point t.\n \"\"\"\n def __init__(self, f, dt, o=1):\n \"\"\"\n Initialize the differentiation solver.\n Params:\n - f the name of the function object ('def f(t):...')\n - dt the calculation step between successive points\n - o the order of the derivative to be calculated\n \"\"\"\n self.f = f\n self.dt = dt\n self.o = o\n\n # coefficients of forward finite difference approximations of order O(h^2)\n self.co = np.array([\n [-3.0, 4.0, -1.0, 0.0, 0.0, 0.0],\n [2.0, -5.0, 4.0, -1.0, 0.0, 0.0],\n [-5.0, 18.0, -24.0, 14.0, -3.0, 0.0],\n [3.0, -14.0, 26.0, -24.0, 11.0, -2.0]\n ])\n self.den = np.array([2 * dt, dt ** 2, 2 * dt ** 3, dt ** 4])\n\n def solve(self, t):\n \"\"\"\n Calculate the derivative at point 't'.\n The method uses Richardson extrapolation to improve accuracy.\n \"\"\"\n df = [0.0, 0.0]\n for i, dt_ in enumerate([self.dt, self.dt / 2]):\n t_array = np.arange(t, t + 6 * dt_, dt_)\n f_array = np.array([self.f(t_i) for t_i in t_array])\n c_array = self.co[self.o - 1, :]\n df[i] = (c_array * f_array) / self.den[self.o - 1]\n return (4.0 * df[1] - df[0]) / 3.0\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
whitews/FlowK | [
"d4e43a0488606ce5479b5110486dc3db128f6a87"
] | [
"flowkit/tests/flowjo_wsp_tests.py"
] | [
"\"\"\"\nTests for FlowJo 10 workspace files\n\"\"\"\nimport copy\nimport unittest\nimport os\nfrom io import BytesIO\nimport numpy as np\nfrom flowkit import Session, gates, transforms\nfrom .session_tests import test_samples_8c_full_set\n\n\nclass FlowJoWSPTestCase(unittest.TestCase):\n def test_load_wsp_single_poly(self):\n wsp_path = \"examples/data/simple_line_example/simple_poly_and_rect.wsp\"\n fcs_path = \"examples/data/simple_line_example/data_set_simple_line_100.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'my_group',\n 'poly1',\n sample_id='data_set_simple_line_100.fcs'\n ),\n gates.PolygonGate\n )\n\n gate_names = {'rect1', 'poly1'}\n wsp_gates_tuple = fks.get_gate_ids('my_group')\n wsp_gate_names = set([g[0] for g in wsp_gates_tuple])\n self.assertSetEqual(wsp_gate_names, gate_names)\n\n def test_load_wsp_single_ellipse(self):\n wsp_path = \"examples/data/simple_line_example/single_ellipse_51_events.wsp\"\n fcs_path = \"examples/data/simple_line_example/data_set_simple_line_100.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'ellipse1',\n sample_id='data_set_simple_line_100.fcs'\n ),\n gates.EllipsoidGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'data_set_simple_line_100.fcs')\n gate_count = results.get_gate_count('ellipse1')\n self.assertEqual(gate_count, 48)\n\n def test_load_wsp_single_quad(self):\n wsp_path = \"examples/data/simple_diamond_example/simple_diamond_example_quad_gate.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n # FlowJo quadrant gates are not true quadrant gates, rather a collection of rectangle gates\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'Q1: channel_A- , channel_B+',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n\n gate_count_q1 = results.get_gate_count('Q1: channel_A- , channel_B+')\n gate_count_q2 = results.get_gate_count('Q2: channel_A+ , channel_B+')\n gate_count_q3 = results.get_gate_count('Q3: channel_A+ , channel_B-')\n gate_count_q4 = results.get_gate_count('Q4: channel_A- , channel_B-')\n self.assertEqual(gate_count_q1, 49671)\n self.assertEqual(gate_count_q2, 50596)\n self.assertEqual(gate_count_q3, 50330)\n self.assertEqual(gate_count_q4, 49403)\n\n def test_wsp_biex_transform(self):\n wsp_path = \"examples/data/simple_diamond_example/test_data_diamond_biex_rect.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'upper_right',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n gate_count = results.get_gate_count('upper_right')\n self.assertEqual(gate_count, 50605)\n\n def test_wsp_fasinh_transform(self):\n wsp_path = \"examples/data/simple_diamond_example/test_data_diamond_asinh_rect.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'upper_right',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n gate_count = results.get_gate_count('upper_right')\n self.assertEqual(gate_count, 50559)\n\n def test_wsp_fasinh_transform_v2(self):\n wsp_path = \"examples/data/simple_diamond_example/test_data_diamond_asinh_rect2.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'upper_right',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n gate_count = results.get_gate_count('upper_right')\n self.assertEqual(gate_count, 50699)\n\n def test_wsp_biex_transform_width_interpolation(self):\n neg = 1.0\n width = -7.943282\n\n # this LUT exists for only the single negative value of 1.0\n lut_file_name = \"tr_biex_l256_w%.6f_n%.6f_m4.418540_r262144.000029.csv\" % (width, neg)\n lut_file_path = os.path.join('examples', 'data', 'flowjo_xforms', lut_file_name)\n y, x = np.loadtxt(lut_file_path, delimiter=',', usecols=(0, 1), skiprows=1, unpack=True)\n\n biex_xform = transforms.WSPBiexTransform('biex', negative=neg, width=width)\n\n test_y = biex_xform.apply(x)\n\n mean_pct_diff = 100. * np.mean(np.abs(test_y[1:] - y[1:]) / y[1:])\n self.assertLess(mean_pct_diff, 0.01)\n\n def test_get_sample_groups(self):\n wsp_path = \"examples/data/simple_line_example/simple_poly_and_rect.wsp\"\n fcs_path = \"examples/data/simple_line_example/data_set_simple_line_100.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n groups = fks.get_sample_groups()\n groups_truth = ['default', 'All Samples', 'my_group']\n\n self.assertListEqual(groups, groups_truth)\n\n fks.add_sample_group('group2')\n groups_truth.append('group2')\n groups = fks.get_sample_groups()\n\n self.assertListEqual(groups, groups_truth)\n\n def test_parse_wsp_with_ellipse(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS_with_ellipse.wsp\"\n fcs_path = \"examples/data/8_color_data_set/fcs_files/101_DEN084Y5_15_E01_008_clean.fcs\"\n sample_id = '101_DEN084Y5_15_E01_008_clean.fcs'\n sample_grp = 'DEN'\n gate_name = 'ellipse1'\n gate_path = ('root', 'Time', 'Singlets', 'aAmine-', 'CD3+')\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n fks.analyze_samples(sample_grp, sample_id=sample_id)\n gate_indices = fks.get_gate_membership(sample_grp, sample_id, gate_name, gate_path=gate_path)\n\n self.assertIsInstance(gate_indices, np.ndarray)\n self.assertEqual(np.sum(gate_indices), 7018)\n\n def test_get_ambiguous_gate_objects(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS.wsp\"\n fcs_path = \"examples/data/8_color_data_set/fcs_files/101_DEN084Y5_15_E01_008_clean.fcs\"\n sample_id = '101_DEN084Y5_15_E01_008_clean.fcs'\n sample_grp = 'DEN'\n gate_name = 'TNFa+'\n gate_path = ('root', 'Time', 'Singlets', 'aAmine-', 'CD3+', 'CD4+')\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n fks.analyze_samples(sample_grp)\n gate_indices = fks.get_gate_membership(sample_grp, sample_id, gate_name, gate_path=gate_path)\n\n self.assertIsInstance(gate_indices, np.ndarray)\n self.assertEqual(np.sum(gate_indices), 21)\n\n def test_parse_wsp_reused_gate_with_child(self):\n wsp_path = \"examples/data/8_color_data_set/reused_quad_gate_with_child.wsp\"\n\n fks = Session(copy.deepcopy(test_samples_8c_full_set))\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n group_name = 'All Samples'\n gate_name = 'some_child_gate'\n\n gate_ids = fks.get_gate_ids(group_name)\n\n gate_id_1 = (gate_name, ('root', 'good cells', 'cd4+', 'Q2: CD107a+, IL2+'))\n gate_id_2 = (gate_name, ('root', 'good cells', 'cd8+', 'Q2: CD107a+, IL2+'))\n\n self.assertIn(gate_id_1, gate_ids)\n self.assertIn(gate_id_2, gate_ids)\n\n def test_analyze_single_sample(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS_simple.wsp\"\n sample_id = '101_DEN084Y5_15_E01_008_clean.fcs'\n sample_grp = 'DEN'\n\n fks = Session(copy.deepcopy(test_samples_8c_full_set))\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n sample_ids = fks.get_group_sample_ids(sample_grp)\n self.assertEqual(len(sample_ids), 3)\n\n fks.analyze_samples(sample_grp, sample_id=sample_id)\n report = fks.get_group_report(sample_grp)\n\n self.assertEqual(report['sample'].nunique(), 1)\n\n def test_export_wsp(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS.wsp\"\n sample_grp = 'DEN'\n\n # use a leaf gate to test if the new WSP session is created correctly\n gate_name = 'TNFa+'\n gate_path = ('root', 'Time', 'Singlets', 'aAmine-', 'CD3+', 'CD4+')\n\n fks = Session(copy.deepcopy(test_samples_8c_full_set))\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n out_file = BytesIO()\n fks.export_wsp(out_file, sample_grp)\n out_file.seek(0)\n\n fks_out = Session(copy.deepcopy(test_samples_8c_full_set))\n fks_out.import_flowjo_workspace(out_file, ignore_missing_files=True)\n\n self.assertIsInstance(fks_out, Session)\n\n fks_gate = fks.get_gate(sample_grp, gate_name, gate_path)\n fks_out_gate = fks_out.get_gate(sample_grp, gate_name, gate_path)\n\n self.assertIsInstance(fks_gate, gates.RectangleGate)\n self.assertIsInstance(fks_out_gate, gates.RectangleGate)\n\n self.assertEqual(fks_gate.gate_name, gate_name)\n self.assertEqual(fks_out_gate.gate_name, gate_name)\n"
] | [
[
"numpy.abs",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hushukai/tf-tensor2tensor | [
"6e685f57ed170bb7f887271d7bbd58cf57eb6af7"
] | [
"tensor2tensor/utils/multistep_optimizer.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Multi-step optimizers simulating large batches.\n\nOptimizer variants which make it possible to use very large batch sizes with\nlimited GPU memory. Optimizers in this module accumulate the gradients for n\nbatches, and call the optimizer's update rule every n batches with the\naccumulated gradients.\n\nSee [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\n\nclass MultistepAdamOptimizer(tf.compat.v1.train.AdamOptimizer):\n \"\"\"Adam with SGD updates every n steps with accumulated gradients.\"\"\"\n\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,\n use_locking=False, name=\"Adam\", n=1):\n super(MultistepAdamOptimizer, self).__init__(\n learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon,\n use_locking=use_locking, name=name)\n self._n = n # Call Adam optimizer every n batches with accumulated grads\n self._n_t = None # n as tensor\n\n def _create_slots(self, var_list):\n \"\"\"Create slot variables for Adam with accumulated gradients.\"\"\"\n super(MultistepAdamOptimizer, self)._create_slots(var_list)\n first_var = min(var_list, key=lambda x: x.name)\n self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1,\n name=\"iter\",\n colocate_with=first_var)\n for v in var_list:\n self._zeros_slot(v, \"grad_acc\", self._name)\n\n def _get_iter_variable(self):\n graph = (\n None if tf.executing_eagerly() else tf.get_default_graph())\n return self._get_non_slot_variable(\"iter\", graph=graph)\n\n def _prepare(self):\n super(MultistepAdamOptimizer, self)._prepare()\n self._n_t = tf.convert_to_tensor(self._n, name=\"n\")\n\n def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):\n \"\"\"Apply conditionally if counter is zero.\"\"\"\n grad_acc = self.get_slot(var, \"grad_acc\")\n\n def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):\n total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)\n adam_op = apply_fn(total_grad, var, *args, **kwargs)\n with tf.control_dependencies([adam_op]):\n grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),\n use_locking=self._use_locking)\n return tf.group(adam_op, grad_acc_to_zero_op)\n\n def accumulate_gradient(grad_acc, grad):\n assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)\n return tf.group(assign_op) # Strip return value\n\n return tf.cond(\n tf.equal(self._get_iter_variable(), 0),\n lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),\n lambda: accumulate_gradient(grad_acc, grad))\n\n def _apply_dense(self, grad, var):\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._apply_dense, grad, var)\n\n def _resource_apply_dense(self, grad, var):\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._resource_apply_dense, grad, var)\n\n def _apply_sparse_shared(self, grad, var, indices, scatter_add):\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._apply_sparse_shared, grad, var,\n indices, scatter_add)\n\n def _apply_sparse(self, grad, var):\n # TODO(fstahlberg): Implement a sparse version\n tf.logging.warning(\"MultistepAdamOptimizer does not support sparse updates\")\n dense_grad = tf.convert_to_tensor(grad)\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._apply_dense, dense_grad, var)\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n tf.logging.warning(\"MultistepAdamOptimizer does not support sparse updates\")\n # Note that conversion to a dense Tensor handles duplicate `indices`\n # correctly (summing them). A real sparse implementation will probably want\n # to override _resource_apply_sparse instead so it gets them de-duplicated\n # automatically.\n dense_grad = tf.convert_to_tensor(\n tf.IndexedSlices(values=grad, indices=indices,\n dense_shape=tf.shape(var)))\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._resource_apply_dense,\n dense_grad, var)\n\n def _finish(self, update_ops, name_scope):\n \"\"\"Updates beta_power variables every n batches and incrs counter.\"\"\"\n iter_ = self._get_iter_variable()\n beta1_power, beta2_power = self._get_beta_accumulators()\n with tf.control_dependencies(update_ops):\n with tf.colocate_with(iter_):\n\n def update_beta_op():\n update_beta1 = beta1_power.assign(\n beta1_power * self._beta1_t,\n use_locking=self._use_locking)\n update_beta2 = beta2_power.assign(\n beta2_power * self._beta2_t,\n use_locking=self._use_locking)\n return tf.group(update_beta1, update_beta2)\n maybe_update_beta = tf.cond(\n tf.equal(iter_, 0), update_beta_op, tf.no_op)\n with tf.control_dependencies([maybe_update_beta]):\n update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),\n use_locking=self._use_locking)\n return tf.group(\n *update_ops + [update_iter, maybe_update_beta], name=name_scope)\n"
] | [
[
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.assign_add",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.mod",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.colocate_with"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liulisixin/unsupervised-learning-intrinsic-images | [
"0d4ad151d203885c87122bcc305c787210b28a5c"
] | [
"data/params.py"
] | [
"import copy\nimport json\nimport random\nimport hashlib\nimport numpy as np\n\n\nclass IntrinsicParameters():\n \"\"\" Global parameter values for the algorithm \"\"\"\n\n def __init__(self):\n\n #: if True, print progress to the console\n self.logging = False\n\n #: if True, use a fixed seed for k-means clustering\n self.fixed_seed = False\n\n #: number of iterations for the global loop\n self.n_iters = 25\n\n #: number of iterations for the dense CRF\n self.n_crf_iters = 10\n\n #: if ``True``, split clusters at the end\n self.split_clusters = True\n\n #: Pixels k units apart vertically or horizontally are smoothed.\n #: The paper only uses k=1.\n self.shading_smooth_k = 1\n\n #: method used to initialize the shading smoothness term:\n #: \"none\": omit this term for the first iteration\n #: \"image\": use the image itself (intensity channel)\n #: \"constant\": constant 0.5\n self.shading_blur_init_method = 'none'\n\n #: standard deviation for blurring the shading channel\n self.shading_blur_sigma = 0.1\n\n #: exponent by which the blur size decreases each iteration\n self.shading_blur_iteration_pow = 1\n\n #: if ``True``, blur in log space. if ``False``, blur in linear\n #: space and then convert to log.\n self.shading_blur_log = True\n\n #: kmeans initialization: weight given to the intensity channel\n self.kmeans_intensity_scale = 0.5\n\n #: kmeans initialization: number of clusters (labels) to use\n self.kmeans_n_clusters = 20\n\n #: kmeans initialization: max pixels to consider at once\n #: (if the image has more than this, the image is randomly subsampled)\n self.kmeans_max_samples = 2000000\n\n #: weight of the absolute reflectance prior\n self.abs_reflectance_weight = 0\n\n #: weight of the absolute shading prior\n self.abs_shading_weight = 500.0\n\n #: gray-point of absolute shading term\n self.abs_shading_gray_point = 0.5\n\n #: if ``True``, compute shading error in log space\n self.abs_shading_log = True\n\n #: weight of the shading smoothness unary term\n self.shading_target_weight = 20000.0\n\n #: norm used to penalize shading smoothness deviations\n self.shading_target_norm = \"L2\"\n\n #: interpret labels as RGB (intensity with chromaticity), thereby\n #: penalizing deviations from grayscale in the shading channel (though\n #: the final answer is always grayscale anyway)\n self.shading_target_chromaticity = False\n\n #: weight of the chromaticity term: each reflectance intensity is\n #: assigned a chromaticity (from the kmeans initialization) and is\n #: encouraged to be assigned to image pixels that share the same\n #: chromaticity.\n self.chromaticity_weight = 0\n\n #: which norm is used for chromaticity\n self.chromaticity_norm = \"L1\"\n\n #: compute reflectance distance in log space for the pairwise terms\n self.pairwise_intensity_log = True\n\n #: include chromaticity in pairwise term\n self.pairwise_intensity_chromaticity = True\n\n #: weight of the pairwise term\n self.pairwise_weight = 10000.0\n\n #: bilateral standard deviation: pairwise pixel distance\n self.theta_p = 0.1\n\n #: bilateral standard deviation: intensity\n self.theta_l = 0.12\n\n #: bilateral standard deviation: chromaticity\n self.theta_c = 0.025\n\n # bilateral standard deviation: Luminance \n self.theta_L = 0.025 \n\n #: if True, keep the median of all intensities fixed in stage 2. This\n #: doesn't really change much, since the solver is damped anyway.\n self.stage2_maintain_median_intensity = True\n\n #: which norm to use when minimizing shading differences in stage 2\n self.stage2_norm = \"L1\"\n\n #: if True, interpret labels as RGB instead of intensity\n self.stage2_chromaticity = False\n\n #: parameters to be saved/loaded\n ALL_PARAMS = [\n 'n_iters',\n 'n_crf_iters',\n 'split_clusters',\n 'kmeans_n_clusters',\n 'kmeans_max_samples',\n 'shading_blur_init_method',\n 'shading_blur_method',\n 'shading_blur_log',\n 'shading_blur_sigma',\n 'shading_blur_bilateral_sigma_range',\n 'shading_blur_iteration_pow',\n 'shading_smooth_k',\n 'kmeans_intensity_scale',\n 'abs_reflectance_weight',\n 'abs_shading_log',\n 'abs_shading_weight',\n 'abs_shading_gray_point',\n 'shading_target_weight',\n 'shading_target_norm',\n 'shading_target_chromaticity',\n 'chromaticity_weight',\n 'chromaticity_norm',\n 'pairwise_intensity_log',\n 'pairwise_intensity_chromaticity',\n 'pairwise_weight',\n 'theta_p',\n 'theta_l',\n 'theta_c',\n 'stage2_norm',\n 'stage2_chromaticity',\n 'stage2_maintain_median_intensity',\n ]\n\n #: parameters to be adjusted during training\n TRAIN_PARAMS = [\n 'n_iters',\n #'n_crf_iters',\n\n 'split_clusters',\n\n 'kmeans_intensity_scale',\n 'kmeans_n_clusters',\n\n 'shading_blur_init_method',\n #'shading_blur_log',\n #'pairwise_intensity_log',\n\n 'shading_blur_sigma',\n 'shading_smooth_k',\n\n 'abs_reflectance_weight',\n #'abs_shading_log',\n 'abs_shading_weight',\n 'abs_shading_gray_point',\n 'shading_target_weight',\n 'chromaticity_weight',\n 'pairwise_weight',\n\n 'theta_p',\n 'theta_l',\n 'theta_c',\n ]\n\n #: these parameters are discrete 1-of-N choices\n PARAM_CHOICES = {\n 'shading_blur_init_method': (\n \"none\",\n \"image\",\n \"constant\",\n ),\n }\n\n #: bounds on paramters\n PARAM_BOUNDS = {\n 'n_iters': (1, 30),\n 'n_crf_iters': (1, 10),\n 'shading_blur_sigma': (1e-8, 1.0),\n 'shading_smooth_k': (1, 4),\n 'kmeans_intensity_scale': (1e-8, 1e10),\n 'kmeans_n_clusters': (2, 50),\n 'abs_reflectance_weight': (0, 1e10),\n 'abs_shading_weight': (0, 1e10),\n 'abs_shading_gray_point': (0, 1e10),\n 'shading_target_weight': (0, 1e10),\n 'chromaticity_weight': (0, 1e10),\n 'pairwise_weight': (0, 1e16),\n 'theta_p': (1e-8, 1e10),\n 'theta_l': (1e-8, 1e10),\n 'theta_c': (1e-8, 1e10),\n }\n\n WEIGHT_PARAMS = [\n 'abs_reflectance_weight',\n 'abs_shading_weight',\n 'shading_target_weight',\n 'chromaticity_weight',\n 'pairwise_weight',\n ]\n\n THETA_PARAMS = [\n 'theta_p',\n 'theta_l',\n 'theta_c',\n ]\n\n def to_json(self, indent=4, **extra_kwargs):\n \"\"\" Convert paramters to a JSON-encoded string \"\"\"\n obj = {k: getattr(self, k)\n for k in IntrinsicParameters.ALL_PARAMS}\n if extra_kwargs:\n obj.update(extra_kwargs)\n return json.dumps(obj, sort_keys=True, indent=indent)\n\n def __str__(self):\n return self.to_json()\n\n def __unicode__(self):\n return self.to_json()\n\n @staticmethod\n def from_file(filename):\n \"\"\" Load paramers from ``filename`` (in JSON format) \"\"\"\n return IntrinsicParameters.from_dict(json.load(open(filename)))\n\n @staticmethod\n def from_dict(d):\n \"\"\" Load paramers from a dictionary \"\"\"\n ret = IntrinsicParameters()\n for k, v in d.iteritems():\n if not k.startswith('_') and k not in IntrinsicParameters.ALL_PARAMS:\n raise ValueError(\"Invalid parameter: %s\" % k)\n setattr(ret, k, d[k])\n return ret\n\n def md5(self):\n dump = self.to_json()\n m = hashlib.md5()\n m.update(dump)\n return m.hexdigest()\n\n def save(self, filename, **extra_kwargs):\n \"\"\" Save paramers to ``filename`` (in JSON format) \"\"\"\n with open(filename, 'w') as f:\n f.write(self.to_json(**extra_kwargs))\n\n def clip(self):\n \"\"\" Clip parameters to be within bounds \"\"\"\n for k, bounds in IntrinsicParameters.PARAM_BOUNDS.iteritems():\n v = getattr(self, k)\n t = type(v)\n setattr(self, k, t(np.clip(v, bounds[0], bounds[1])))\n\n def random_perterbation(\n self, mean_num_params=8, std_delta=0.5, seed=None):\n \"\"\" Return a new set of parameters with a random perterbation. The\n number of variables modified is Poisson-distributed with mean\n ``mean_num_params`` , and each changed variable is multiplied by exp(x)\n where x is normally distributed with mean 0 and standard deviation\n ``std_delta`` \"\"\"\n\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n\n # choose a random subset to modify\n num_params = len(IntrinsicParameters.TRAIN_PARAMS)\n n = np.clip(np.random.poisson(mean_num_params), 1, num_params)\n keys = random.sample(IntrinsicParameters.TRAIN_PARAMS, n)\n\n # modify the subset\n ret = copy.deepcopy(self)\n for k in keys:\n v = getattr(ret, k)\n t = type(v)\n\n if k in IntrinsicParameters.PARAM_CHOICES:\n v = random.choice(IntrinsicParameters.PARAM_CHOICES[k])\n elif t == bool:\n v = random.choice((False, True))\n else:\n v *= np.exp(random.normalvariate(0, std_delta))\n\n if t in (int, long):\n v = round(v)\n setattr(ret, k, t(v))\n\n ret.clip()\n return ret\n"
] | [
[
"numpy.random.poisson",
"numpy.random.seed",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wenming2014/tensorflow | [
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"a102a6a71844e194f3946f6318768c5367f1f16b",
"07da23bfa2a9ca10cd7c1dd6bea0f85d981c013e",
"a102a6a71844e194f3946f6318768c5367f1f16b"
] | [
"tensorflow/python/training/checkpoint_management.py",
"tensorflow/python/framework/dtypes.py",
"tensorflow/python/kernel_tests/diag_op_test.py",
"tensorflow/python/data/kernel_tests/dataset_constructor_op_test.py",
"tensorflow/python/data/experimental/kernel_tests/sleep_test.py",
"tensorflow/python/distribute/cross_device_utils.py",
"tensorflow/python/ops/ragged/ragged_map_inner_values_op_test.py",
"tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py",
"tensorflow/python/kernel_tests/signal/mfcc_ops_test.py",
"tensorflow/python/kernel_tests/candidate_sampler_ops_test.py",
"tensorflow/python/ops/ragged/ragged_where_op_test.py",
"tensorflow/python/ops/metrics_impl.py",
"tensorflow/python/ops/ragged/ragged_util.py",
"tensorflow/python/autograph/converters/control_flow_test.py",
"tensorflow/python/training/checkpointable/data_structures_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=invalid-name\n\"\"\"Save and restore variables.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os.path\nimport re\nimport time\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.checkpoint_state_pb2 import CheckpointState\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _GetCheckpointFilename(save_dir, latest_filename):\n \"\"\"Returns a filename for storing the CheckpointState.\n\n Args:\n save_dir: The directory for saving and restoring checkpoints.\n latest_filename: Name of the file in 'save_dir' that is used\n to store the CheckpointState.\n\n Returns:\n The path of the file that contains the CheckpointState proto.\n \"\"\"\n if latest_filename is None:\n latest_filename = \"checkpoint\"\n return os.path.join(save_dir, latest_filename)\n\n\[email protected](\n date=None,\n instructions=(\"Use tf.train.CheckpointManager to manage checkpoints rather \"\n \"than editing the Checkpoint proto manually.\"))\n@tf_export(v1=[\"train.generate_checkpoint_state_proto\"])\ndef generate_checkpoint_state_proto(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n all_model_checkpoint_timestamps=None,\n last_preserved_timestamp=None):\n \"\"\"Generates a checkpoint state proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n all_model_checkpoint_timestamps: A list of floats, indicating the number of\n seconds since the Epoch when each checkpoint was generated.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\n Returns:\n CheckpointState proto with model_checkpoint_path and\n all_model_checkpoint_paths updated to either absolute paths or\n relative paths to the current save_dir.\n\n Raises:\n ValueError: If `all_model_checkpoint_timestamps` was provided but its length\n does not match `all_model_checkpoint_paths`.\n \"\"\"\n if all_model_checkpoint_paths is None:\n all_model_checkpoint_paths = []\n\n if (not all_model_checkpoint_paths or\n all_model_checkpoint_paths[-1] != model_checkpoint_path):\n logging.info(\"%s is not in all_model_checkpoint_paths. Manually adding it.\",\n model_checkpoint_path)\n all_model_checkpoint_paths.append(model_checkpoint_path)\n\n if (all_model_checkpoint_timestamps\n and (len(all_model_checkpoint_timestamps)\n != len(all_model_checkpoint_paths))):\n raise ValueError(\n (\"Checkpoint timestamps, if provided, must match checkpoint paths (got \"\n \"paths %s and timestamps %s)\")\n % (all_model_checkpoint_paths, all_model_checkpoint_timestamps))\n\n # Relative paths need to be rewritten to be relative to the \"save_dir\"\n # if model_checkpoint_path already contains \"save_dir\".\n if not os.path.isabs(save_dir):\n if not os.path.isabs(model_checkpoint_path):\n model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)\n for i in range(len(all_model_checkpoint_paths)):\n p = all_model_checkpoint_paths[i]\n if not os.path.isabs(p):\n all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)\n\n coord_checkpoint_proto = CheckpointState(\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n\n return coord_checkpoint_proto\n\n\[email protected](\n date=None,\n instructions=(\"Use tf.train.CheckpointManager to manage checkpoints rather \"\n \"than manually editing the Checkpoint proto.\"))\n@tf_export(v1=[\"train.update_checkpoint_state\"])\ndef update_checkpoint_state(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n latest_filename=None,\n all_model_checkpoint_timestamps=None,\n last_preserved_timestamp=None):\n \"\"\"Updates the content of the 'checkpoint' file.\n\n This updates the checkpoint file containing a CheckpointState\n proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\n seconds since the Epoch) indicating when the checkpoints in\n `all_model_checkpoint_paths` were created.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\n Raises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate.\n \"\"\"\n update_checkpoint_state_internal(\n save_dir=save_dir,\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n latest_filename=latest_filename,\n save_relative_paths=False,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n\n\ndef update_checkpoint_state_internal(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n latest_filename=None,\n save_relative_paths=False,\n all_model_checkpoint_timestamps=None,\n last_preserved_timestamp=None):\n \"\"\"Updates the content of the 'checkpoint' file.\n\n This updates the checkpoint file containing a CheckpointState\n proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n save_relative_paths: If `True`, will write relative paths to the checkpoint\n state file.\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\n seconds since the Epoch) indicating when the checkpoints in\n `all_model_checkpoint_paths` were created.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\n\n Raises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate.\n \"\"\"\n # Writes the \"checkpoint\" file for the coordinator for later restoration.\n coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)\n if save_relative_paths:\n if os.path.isabs(model_checkpoint_path):\n rel_model_checkpoint_path = os.path.relpath(\n model_checkpoint_path, save_dir)\n else:\n rel_model_checkpoint_path = model_checkpoint_path\n rel_all_model_checkpoint_paths = []\n for p in all_model_checkpoint_paths:\n if os.path.isabs(p):\n rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))\n else:\n rel_all_model_checkpoint_paths.append(p)\n ckpt = generate_checkpoint_state_proto(\n save_dir,\n rel_model_checkpoint_path,\n all_model_checkpoint_paths=rel_all_model_checkpoint_paths,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n else:\n ckpt = generate_checkpoint_state_proto(\n save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n\n if coord_checkpoint_filename == ckpt.model_checkpoint_path:\n raise RuntimeError(\"Save path '%s' conflicts with path used for \"\n \"checkpoint state. Please use a different save path.\" %\n model_checkpoint_path)\n\n # Preventing potential read/write race condition by *atomically* writing to a\n # file.\n file_io.atomic_write_string_to_file(coord_checkpoint_filename,\n text_format.MessageToString(ckpt))\n\n\n@tf_export(\"train.get_checkpoint_state\")\ndef get_checkpoint_state(checkpoint_dir, latest_filename=None):\n \"\"\"Returns CheckpointState proto from the \"checkpoint\" file.\n\n If the \"checkpoint\" file contains a valid CheckpointState\n proto, returns it.\n\n Args:\n checkpoint_dir: The directory of checkpoints.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n\n Returns:\n A CheckpointState if the state was available, None\n otherwise.\n\n Raises:\n ValueError: if the checkpoint read doesn't have model_checkpoint_path set.\n \"\"\"\n ckpt = None\n coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,\n latest_filename)\n f = None\n try:\n # Check that the file exists before opening it to avoid\n # many lines of errors from colossus in the logs.\n if file_io.file_exists(coord_checkpoint_filename):\n file_content = file_io.read_file_to_string(\n coord_checkpoint_filename)\n ckpt = CheckpointState()\n text_format.Merge(file_content, ckpt)\n if not ckpt.model_checkpoint_path:\n raise ValueError(\"Invalid checkpoint state loaded from \"\n + checkpoint_dir)\n # For relative model_checkpoint_path and all_model_checkpoint_paths,\n # prepend checkpoint_dir.\n if not os.path.isabs(ckpt.model_checkpoint_path):\n ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,\n ckpt.model_checkpoint_path)\n for i in range(len(ckpt.all_model_checkpoint_paths)):\n p = ckpt.all_model_checkpoint_paths[i]\n if not os.path.isabs(p):\n ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)\n except errors.OpError as e:\n # It's ok if the file cannot be read\n logging.warning(\"%s: %s\", type(e).__name__, e)\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\n return None\n except text_format.ParseError as e:\n logging.warning(\"%s: %s\", type(e).__name__, e)\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\n return None\n finally:\n if f:\n f.close()\n return ckpt\n\n\ndef _prefix_to_checkpoint_path(prefix, format_version):\n \"\"\"Returns the pathname of a checkpoint file, given the checkpoint prefix.\n\n For V1 checkpoint, simply returns the prefix itself (the data file). For V2,\n returns the pathname to the index file.\n\n Args:\n prefix: a string, the prefix of a checkpoint.\n format_version: the checkpoint format version that corresponds to the\n prefix.\n Returns:\n The pathname of a checkpoint file, taking into account the checkpoint\n format version.\n \"\"\"\n if format_version == saver_pb2.SaverDef.V2:\n return prefix + \".index\" # The index file identifies a checkpoint.\n return prefix # Just the data file.\n\n\n@tf_export(\"train.latest_checkpoint\")\ndef latest_checkpoint(checkpoint_dir, latest_filename=None):\n \"\"\"Finds the filename of latest saved checkpoint file.\n\n Args:\n checkpoint_dir: Directory where the variables were saved.\n latest_filename: Optional name for the protocol buffer file that\n contains the list of most recent checkpoint filenames.\n See the corresponding argument to `Saver.save()`.\n\n Returns:\n The full path to the latest checkpoint or `None` if no checkpoint was found.\n \"\"\"\n # Pick the latest checkpoint based on checkpoint state.\n ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)\n if ckpt and ckpt.model_checkpoint_path:\n # Look for either a V2 path or a V1 path, with priority for V2.\n v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\n saver_pb2.SaverDef.V2)\n v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\n saver_pb2.SaverDef.V1)\n if file_io.get_matching_files(v2_path) or file_io.get_matching_files(\n v1_path):\n return ckpt.model_checkpoint_path\n else:\n logging.error(\"Couldn't match files for checkpoint %s\",\n ckpt.model_checkpoint_path)\n return None\n\n\[email protected](\n date=None,\n instructions=\"Use standard file APIs to check for files with this prefix.\")\n@tf_export(v1=[\"train.checkpoint_exists\"])\ndef checkpoint_exists(checkpoint_prefix):\n \"\"\"Checks whether a V1 or V2 checkpoint exists with the specified prefix.\n\n This is the recommended way to check if a checkpoint exists, since it takes\n into account the naming difference between V1 and V2 formats.\n\n Args:\n checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking\n priority. Typically the result of `Saver.save()` or that of\n `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\n V1/V2.\n Returns:\n A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.\n \"\"\"\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\n saver_pb2.SaverDef.V2)\n if file_io.get_matching_files(pathname):\n return True\n elif file_io.get_matching_files(checkpoint_prefix):\n return True\n else:\n return False\n\n\[email protected](\n date=None,\n instructions=\"Use standard file utilities to get mtimes.\")\n@tf_export(v1=[\"train.get_checkpoint_mtimes\"])\ndef get_checkpoint_mtimes(checkpoint_prefixes):\n \"\"\"Returns the mtimes (modification timestamps) of the checkpoints.\n\n Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files\n exist, collect their mtime. Both V2 and V1 checkpoints are considered, in\n that priority.\n\n This is the recommended way to get the mtimes, since it takes into account\n the naming difference between V1 and V2 formats.\n\n Args:\n checkpoint_prefixes: a list of checkpoint paths, typically the results of\n `Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of\n sharded/non-sharded or V1/V2.\n Returns:\n A list of mtimes (in microseconds) of the found checkpoints.\n \"\"\"\n mtimes = []\n\n def match_maybe_append(pathname):\n fnames = file_io.get_matching_files(pathname)\n if fnames:\n mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)\n return True\n return False\n\n for checkpoint_prefix in checkpoint_prefixes:\n # Tries V2's metadata file first.\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\n saver_pb2.SaverDef.V2)\n if match_maybe_append(pathname):\n continue\n # Otherwise, tries V1, where the prefix is the complete pathname.\n match_maybe_append(checkpoint_prefix)\n\n return mtimes\n\n\[email protected](\n date=None,\n instructions=\"Use standard file APIs to delete files with this prefix.\")\n@tf_export(v1=[\"train.remove_checkpoint\"])\ndef remove_checkpoint(checkpoint_prefix,\n checkpoint_format_version=saver_pb2.SaverDef.V2,\n meta_graph_suffix=\"meta\"):\n \"\"\"Removes a checkpoint given by `checkpoint_prefix`.\n\n Args:\n checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result\n of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of\n sharded/non-sharded or V1/V2.\n checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to\n `SaverDef.V2`.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n \"\"\"\n _delete_file_if_exists(\n meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\n if checkpoint_format_version == saver_pb2.SaverDef.V2:\n # V2 has a metadata file and some data files.\n _delete_file_if_exists(checkpoint_prefix + \".index\")\n _delete_file_if_exists(checkpoint_prefix + \".data-?????-of-?????\")\n else:\n # V1, Legacy. Exact match on the data file.\n _delete_file_if_exists(checkpoint_prefix)\n\n\ndef _delete_file_if_exists(filespec):\n \"\"\"Deletes files matching `filespec`.\"\"\"\n for pathname in file_io.get_matching_files(filespec):\n file_io.delete_file(pathname)\n\n\ndef meta_graph_filename(checkpoint_filename, meta_graph_suffix=\"meta\"):\n \"\"\"Returns the meta graph filename.\n\n Args:\n checkpoint_filename: Name of the checkpoint file.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n\n Returns:\n MetaGraph file name.\n \"\"\"\n # If the checkpoint_filename is sharded, the checkpoint_filename could\n # be of format model.ckpt-step#-?????-of-shard#. For example,\n # model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.\n basename = re.sub(r\"-[\\d\\?]+-of-\\d+$\", \"\", checkpoint_filename)\n suffixed_filename = \".\".join([basename, meta_graph_suffix])\n return suffixed_filename\n\n\n# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?\n@tf_export(\"train.CheckpointManager\")\nclass CheckpointManager(object):\n \"\"\"Deletes old checkpoints.\n\n Example usage:\n ```python\n import tensorflow as tf\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n manager = tf.contrib.checkpoint.CheckpointManager(\n checkpoint, directory=\"/tmp/model\", max_to_keep=5)\n status = checkpoint.restore(manager.latest_checkpoint)\n while True:\n # train\n manager.save()\n ```\n\n `CheckpointManager` preserves its own state across instantiations (see the\n `__init__` documentation for details). Only one should be active in a\n particular directory at a time.\n \"\"\"\n\n def __init__(self, checkpoint, directory,\n max_to_keep, keep_checkpoint_every_n_hours=None):\n \"\"\"Configure a `CheckpointManager` for use in `directory`.\n\n If a `CheckpointManager` was previously used in `directory`, its\n state will be restored. This includes the list of managed checkpoints and\n the timestamp bookkeeping necessary to support\n `keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`\n will be the same as the previous `CheckpointManager`, including cleaning up\n existing checkpoints if appropriate.\n\n Checkpoints are only considered for deletion just after a new checkpoint has\n been added. At that point, `max_to_keep` checkpoints will remain in an\n \"active set\". Once a checkpoint is preserved by\n `keep_checkpoint_every_n_hours` it will not be deleted by this\n `CheckpointManager` or any future `CheckpointManager` instantiated in\n `directory` (regardless of the new setting of\n `keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the\n active set may be deleted by this `CheckpointManager` or a future\n `CheckpointManager` instantiated in `directory` (subject to its\n `max_to_keep` and `keep_checkpoint_every_n_hours` settings).\n\n Args:\n checkpoint: The `tf.train.Checkpoint` instance to save and manage\n checkpoints for.\n directory: The path to a directory in which to write checkpoints. A\n special file named \"checkpoint\" is also written to this directory (in a\n human-readable text format) which contains the state of the\n `CheckpointManager`.\n max_to_keep: An integer, the number of checkpoints to keep. Unless\n preserved by `keep_checkpoint_every_n_hours`, checkpoints will be\n deleted from the active set, oldest first, until only `max_to_keep`\n checkpoints remain. If `None`, no checkpoints are deleted and everything\n stays in the active set. Note that `max_to_keep=None` will keep all\n checkpoint paths in memory and in the checkpoint state protocol buffer\n on disk.\n keep_checkpoint_every_n_hours: Upon removal from the active set, a\n checkpoint will be preserved if it has been at least\n `keep_checkpoint_every_n_hours` since the last preserved checkpoint. The\n default setting of `None` does not preserve any checkpoints in this way.\n\n Raises:\n ValueError: If `max_to_keep` is not a positive integer.\n \"\"\"\n self._checkpoint = checkpoint\n self._save_counter_assign = None\n if max_to_keep is not None and max_to_keep <= 0:\n raise ValueError(\n (\"Expected a positive integer or `None` for `max_to_max_to_keep`, \"\n \"got %d.\")\n % (max_to_keep,))\n self._max_to_keep = max_to_keep\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\n self._directory = directory\n self._checkpoint_prefix = os.path.join(directory, \"ckpt\")\n recovered_state = get_checkpoint_state(directory)\n current_clock = time.time()\n self._maybe_delete = collections.OrderedDict()\n if recovered_state is None:\n self._latest_checkpoint = None\n # Set the clock back slightly to avoid race conditions when quckly\n # re-creating a CheckpointManager.\n self._last_preserved_timestamp = current_clock - 1.\n else:\n self._latest_checkpoint = recovered_state.model_checkpoint_path\n self._last_preserved_timestamp = recovered_state.last_preserved_timestamp\n if current_clock < self._last_preserved_timestamp:\n # Time seems to have reversed itself. In addition to this warning, we'll\n # min() saved checkpoint timestamps with the current time to ensure that\n # old checkpoints don't get deleted accidentally.\n logging.warning(\n (\"time.time() returned a value %f seconds behind the last \"\n \"preserved checkpoint timestamp.\")\n % (self._last_preserved_timestamp - current_clock,))\n self._last_preserved_timestamp = current_clock\n all_timestamps = recovered_state.all_model_checkpoint_timestamps\n all_paths = recovered_state.all_model_checkpoint_paths\n del recovered_state # Uses modified values from now on\n if not all_timestamps:\n all_timestamps = [self._last_preserved_timestamp] * len(all_paths)\n\n for filename, timestamp in zip(all_paths, all_timestamps):\n timestamp = min(timestamp, current_clock)\n if timestamp > self._last_preserved_timestamp:\n self._maybe_delete[filename] = timestamp\n\n @property\n def latest_checkpoint(self):\n \"\"\"The prefix of the most recent checkpoint in `directory`.\n\n Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is\n the constructor argument to `CheckpointManager`.\n\n Suitable for passing to `tf.train.Checkpoint.restore` to resume training.\n\n Returns:\n The checkpoint prefix. If there are no checkpoints, returns `None`.\n \"\"\"\n return self._latest_checkpoint\n\n @property\n def checkpoints(self):\n \"\"\"A list of managed checkpoints.\n\n Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not\n show up in this list (to avoid ever-growing filename lists).\n\n Returns:\n A list of filenames, sorted from oldest to newest.\n \"\"\"\n return list(self._maybe_delete.keys())\n\n def _sweep(self):\n \"\"\"Deletes or preserves managed checkpoints.\"\"\"\n if not self._max_to_keep:\n # Does not update self._last_preserved_timestamp, since everything is kept\n # in the active set.\n return\n while len(self._maybe_delete) > self._max_to_keep:\n filename, timestamp = self._maybe_delete.popitem(last=False)\n # Even if we're keeping this checkpoint due to\n # keep_checkpoint_every_n_hours, we won't reference it to avoid\n # infinitely-growing CheckpointState protos.\n if (self._keep_checkpoint_every_n_hours\n and (timestamp - self._keep_checkpoint_every_n_hours * 3600.\n >= self._last_preserved_timestamp)):\n self._last_preserved_timestamp = timestamp\n continue\n remove_checkpoint(filename)\n\n def _record_state(self):\n \"\"\"Saves the `CheckpointManager`'s state in `directory`.\"\"\"\n filenames, timestamps = zip(*self._maybe_delete.items())\n update_checkpoint_state_internal(\n self._directory,\n model_checkpoint_path=self.latest_checkpoint,\n all_model_checkpoint_paths=filenames,\n all_model_checkpoint_timestamps=timestamps,\n last_preserved_timestamp=self._last_preserved_timestamp,\n save_relative_paths=True)\n\n @property\n def _prefix(self):\n \"\"\"A common prefix for all checkpoints saved with this manager.\n\n For example, if `directory` (a constructor argument) were `\"/tmp/tf-model\"`,\n `prefix` would be `\"/tmp/tf-model/ckpt\"` and checkpoints would generally be\n numbered `\"/tmp/tf-model/ckpt-1\"`, `\"/tmp/tf-model/ckpt-2\"`, and so on. Each\n checkpoint has several associated files\n (e.g. `\"/tmp/tf-model/ckpt-2.index\"`).\n\n Returns:\n A string prefix.\n \"\"\"\n return self._checkpoint_prefix\n\n def save(self, checkpoint_number=None):\n \"\"\"Creates a new checkpoint and manages it.\n\n Args:\n checkpoint_number: An optional integer, or an integer-dtype `Variable` or\n `Tensor`, used to number the checkpoint. If `None` (default),\n checkpoints are numbered using `checkpoint.save_counter`. Even if\n `checkpoint_number` is provided, `save_counter` is still incremented. A\n user-provided `checkpoint_number` is not incremented even if it is a\n `Variable`.\n\n Returns:\n The path to the new checkpoint. It is also recorded in the `checkpoints`\n and `latest_checkpoint` properies.\n \"\"\"\n # Save counter logic duplicated from tf.train.Checkpoint, soon to diverge\n # slightly with a custom numbering option.\n if context.executing_eagerly():\n save_counter = self._checkpoint.save_counter\n save_counter.assign_add(1)\n session = None\n else:\n session = ops.get_default_session()\n\n def _initializing_creator(next_creator, **kwargs):\n \"\"\"Initialize the save counter if it has been newly created.\"\"\"\n v = next_creator(**kwargs)\n session.run(v.initializer)\n return v\n\n with variable_scope.variable_creator_scope(_initializing_creator):\n save_counter = self._checkpoint.save_counter\n if self._save_counter_assign is None:\n self._save_counter_assign = save_counter.assign_add(1, read_value=False)\n session.run(self._save_counter_assign)\n if checkpoint_number is None:\n checkpoint_number = save_counter\n if not isinstance(checkpoint_number, compat.integral_types):\n checkpoint_number = training_util.global_step(\n sess=session, global_step_tensor=checkpoint_number)\n prefix = \"%s-%d\" % (self._prefix, checkpoint_number)\n save_path = self._checkpoint.write(prefix)\n timestamp = time.time()\n # If this is an overwritten checkpoint we were previously tracking, delete\n # and reinsert it to make sure it goes to the end of the queue.\n if save_path in self._maybe_delete:\n del self._maybe_delete[save_path]\n self._maybe_delete[save_path] = timestamp\n self._latest_checkpoint = save_path\n self._sweep()\n self._record_state()\n return save_path\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library of dtypes (Tensor element types).\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import builtins\n\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.util.tf_export import tf_export\n\n_np_bfloat16 = pywrap_tensorflow.TF_bfloat16_type()\n\n\n@tf_export(\"dtypes.DType\", \"DType\")\nclass DType(object):\n \"\"\"Represents the type of the elements in a `Tensor`.\n\n The following `DType` objects are defined:\n\n * `tf.float16`: 16-bit half-precision floating-point.\n * `tf.float32`: 32-bit single-precision floating-point.\n * `tf.float64`: 64-bit double-precision floating-point.\n * `tf.bfloat16`: 16-bit truncated floating-point.\n * `tf.complex64`: 64-bit single-precision complex.\n * `tf.complex128`: 128-bit double-precision complex.\n * `tf.int8`: 8-bit signed integer.\n * `tf.uint8`: 8-bit unsigned integer.\n * `tf.uint16`: 16-bit unsigned integer.\n * `tf.uint32`: 32-bit unsigned integer.\n * `tf.uint64`: 64-bit unsigned integer.\n * `tf.int16`: 16-bit signed integer.\n * `tf.int32`: 32-bit signed integer.\n * `tf.int64`: 64-bit signed integer.\n * `tf.bool`: Boolean.\n * `tf.string`: String.\n * `tf.qint8`: Quantized 8-bit signed integer.\n * `tf.quint8`: Quantized 8-bit unsigned integer.\n * `tf.qint16`: Quantized 16-bit signed integer.\n * `tf.quint16`: Quantized 16-bit unsigned integer.\n * `tf.qint32`: Quantized 32-bit signed integer.\n * `tf.resource`: Handle to a mutable resource.\n * `tf.variant`: Values of arbitrary types.\n\n In addition, variants of these types with the `_ref` suffix are\n defined for reference-typed tensors.\n\n The `tf.as_dtype()` function converts numpy types and string type\n names to a `DType` object.\n \"\"\"\n\n def __init__(self, type_enum):\n \"\"\"Creates a new `DataType`.\n\n NOTE(mrry): In normal circumstances, you should not need to\n construct a `DataType` object directly. Instead, use the\n `tf.as_dtype()` function.\n\n Args:\n type_enum: A `types_pb2.DataType` enum value.\n\n Raises:\n TypeError: If `type_enum` is not a value `types_pb2.DataType`.\n\n \"\"\"\n # TODO(mrry): Make the necessary changes (using __new__) to ensure\n # that calling this returns one of the interned values.\n type_enum = int(type_enum)\n if (type_enum not in types_pb2.DataType.values() or\n type_enum == types_pb2.DT_INVALID):\n raise TypeError(\n \"type_enum is not a valid types_pb2.DataType: %s\" % type_enum)\n self._type_enum = type_enum\n\n @property\n def _is_ref_dtype(self):\n \"\"\"Returns `True` if this `DType` represents a reference type.\"\"\"\n return self._type_enum > 100\n\n @property\n def _as_ref(self):\n \"\"\"Returns a reference `DType` based on this `DType`.\"\"\"\n if self._is_ref_dtype:\n return self\n else:\n return _INTERN_TABLE[self._type_enum + 100]\n\n @property\n def base_dtype(self):\n \"\"\"Returns a non-reference `DType` based on this `DType`.\"\"\"\n if self._is_ref_dtype:\n return _INTERN_TABLE[self._type_enum - 100]\n else:\n return self\n\n @property\n def real_dtype(self):\n \"\"\"Returns the dtype correspond to this dtype's real part.\"\"\"\n base = self.base_dtype\n if base == complex64:\n return float32\n elif base == complex128:\n return float64\n else:\n return self\n\n @property\n def is_numpy_compatible(self):\n return self._type_enum not in _NUMPY_INCOMPATIBLE\n\n @property\n def as_numpy_dtype(self):\n \"\"\"Returns a `numpy.dtype` based on this `DType`.\"\"\"\n return _TF_TO_NP[self._type_enum]\n\n @property\n def as_datatype_enum(self):\n \"\"\"Returns a `types_pb2.DataType` enum value based on this `DType`.\"\"\"\n return self._type_enum\n\n @property\n def is_bool(self):\n \"\"\"Returns whether this is a boolean data type\"\"\"\n return self.base_dtype == bool\n\n @property\n def is_integer(self):\n \"\"\"Returns whether this is a (non-quantized) integer type.\"\"\"\n return (self.is_numpy_compatible and not self.is_quantized and\n np.issubdtype(self.as_numpy_dtype, np.integer))\n\n @property\n def is_floating(self):\n \"\"\"Returns whether this is a (non-quantized, real) floating point type.\"\"\"\n return ((self.is_numpy_compatible and\n np.issubdtype(self.as_numpy_dtype, np.floating)) or\n self.base_dtype == bfloat16)\n\n @property\n def is_complex(self):\n \"\"\"Returns whether this is a complex floating point type.\"\"\"\n return self.base_dtype in (complex64, complex128)\n\n @property\n def is_quantized(self):\n \"\"\"Returns whether this is a quantized data type.\"\"\"\n return self.base_dtype in _QUANTIZED_DTYPES_NO_REF\n\n @property\n def is_unsigned(self):\n \"\"\"Returns whether this type is unsigned.\n\n Non-numeric, unordered, and quantized types are not considered unsigned, and\n this function returns `False`.\n\n Returns:\n Whether a `DType` is unsigned.\n \"\"\"\n try:\n return self.min == 0\n except TypeError:\n return False\n\n @property\n def min(self):\n \"\"\"Returns the minimum representable value in this data type.\n\n Raises:\n TypeError: if this is a non-numeric, unordered, or quantized type.\n\n \"\"\"\n if (self.is_quantized or\n self.base_dtype in (bool, string, complex64, complex128)):\n raise TypeError(\"Cannot find minimum value of %s.\" % self)\n\n # there is no simple way to get the min value of a dtype, we have to check\n # float and int types separately\n try:\n return np.finfo(self.as_numpy_dtype()).min\n except: # bare except as possible raises by finfo not documented\n try:\n return np.iinfo(self.as_numpy_dtype()).min\n except:\n if self.base_dtype == bfloat16:\n return _np_bfloat16(float.fromhex(\"-0x1.FEp127\"))\n raise TypeError(\"Cannot find minimum value of %s.\" % self)\n\n @property\n def max(self):\n \"\"\"Returns the maximum representable value in this data type.\n\n Raises:\n TypeError: if this is a non-numeric, unordered, or quantized type.\n\n \"\"\"\n if (self.is_quantized or\n self.base_dtype in (bool, string, complex64, complex128)):\n raise TypeError(\"Cannot find maximum value of %s.\" % self)\n\n # there is no simple way to get the max value of a dtype, we have to check\n # float and int types separately\n try:\n return np.finfo(self.as_numpy_dtype()).max\n except: # bare except as possible raises by finfo not documented\n try:\n return np.iinfo(self.as_numpy_dtype()).max\n except:\n if self.base_dtype == bfloat16:\n return _np_bfloat16(float.fromhex(\"0x1.FEp127\"))\n raise TypeError(\"Cannot find maximum value of %s.\" % self)\n\n @property\n def limits(self, clip_negative=True):\n \"\"\"Return intensity limits, i.e. (min, max) tuple, of the dtype.\n Args:\n clip_negative : bool, optional\n If True, clip the negative range (i.e. return 0 for min intensity)\n even if the image dtype allows negative values.\n Returns\n min, max : tuple\n Lower and upper intensity limits.\n \"\"\"\n min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin\n if clip_negative:\n min = 0 # pylint: disable=redefined-builtin\n return min, max\n\n def is_compatible_with(self, other):\n \"\"\"Returns True if the `other` DType will be converted to this DType.\n\n The conversion rules are as follows:\n\n ```python\n DType(T) .is_compatible_with(DType(T)) == True\n DType(T) .is_compatible_with(DType(T).as_ref) == True\n DType(T).as_ref.is_compatible_with(DType(T)) == False\n DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True\n ```\n\n Args:\n other: A `DType` (or object that may be converted to a `DType`).\n\n Returns:\n True if a Tensor of the `other` `DType` will be implicitly converted to\n this `DType`.\n \"\"\"\n other = as_dtype(other)\n return self._type_enum in (other.as_datatype_enum,\n other.base_dtype.as_datatype_enum)\n\n def __eq__(self, other):\n \"\"\"Returns True iff this DType refers to the same type as `other`.\"\"\"\n if other is None:\n return False\n try:\n dtype = as_dtype(other).as_datatype_enum\n return self._type_enum == dtype # pylint: disable=protected-access\n except TypeError:\n return False\n\n def __ne__(self, other):\n \"\"\"Returns True iff self != other.\"\"\"\n return not self.__eq__(other)\n\n @property\n def name(self):\n \"\"\"Returns the string name for this `DType`.\"\"\"\n return _TYPE_TO_STRING[self._type_enum]\n\n def __int__(self):\n return self._type_enum\n\n def __str__(self):\n return \"<dtype: %r>\" % self.name\n\n def __repr__(self):\n return \"tf.\" + self.name\n\n def __hash__(self):\n return self._type_enum\n\n def __reduce__(self):\n return as_dtype, (self.name,)\n\n @property\n def size(self):\n if (self._type_enum == types_pb2.DT_VARIANT or\n self._type_enum == types_pb2.DT_RESOURCE):\n return 1\n return np.dtype(self.as_numpy_dtype).itemsize\n\n\n# Define data type range of numpy dtype\ndtype_range = {\n np.bool_: (False, True),\n np.bool8: (False, True),\n np.uint8: (0, 255),\n np.uint16: (0, 65535),\n np.int8: (-128, 127),\n np.int16: (-32768, 32767),\n np.int64: (-2**63, 2**63 - 1),\n np.uint64: (0, 2**64 - 1),\n np.int32: (-2**31, 2**31 - 1),\n np.uint32: (0, 2**32 - 1),\n np.float32: (-1, 1),\n np.float64: (-1, 1)\n}\n\n# Define standard wrappers for the types_pb2.DataType enum.\nresource = DType(types_pb2.DT_RESOURCE)\ntf_export(\"dtypes.resource\", \"resource\").export_constant(__name__, \"resource\")\nvariant = DType(types_pb2.DT_VARIANT)\ntf_export(\"dtypes.variant\", \"variant\").export_constant(__name__, \"variant\")\nfloat16 = DType(types_pb2.DT_HALF)\ntf_export(\"dtypes.float16\", \"float16\").export_constant(__name__, \"float16\")\nhalf = float16\ntf_export(\"dtypes.half\", \"half\").export_constant(__name__, \"half\")\nfloat32 = DType(types_pb2.DT_FLOAT)\ntf_export(\"dtypes.float32\", \"float32\").export_constant(__name__, \"float32\")\nfloat64 = DType(types_pb2.DT_DOUBLE)\ntf_export(\"dtypes.float64\", \"float64\").export_constant(__name__, \"float64\")\ndouble = float64\ntf_export(\"dtypes.double\", \"double\").export_constant(__name__, \"double\")\nint32 = DType(types_pb2.DT_INT32)\ntf_export(\"dtypes.int32\", \"int32\").export_constant(__name__, \"int32\")\nuint8 = DType(types_pb2.DT_UINT8)\ntf_export(\"dtypes.uint8\", \"uint8\").export_constant(__name__, \"uint8\")\nuint16 = DType(types_pb2.DT_UINT16)\ntf_export(\"dtypes.uint16\", \"uint16\").export_constant(__name__, \"uint16\")\nuint32 = DType(types_pb2.DT_UINT32)\ntf_export(\"dtypes.uint32\", \"uint32\").export_constant(__name__, \"uint32\")\nuint64 = DType(types_pb2.DT_UINT64)\ntf_export(\"dtypes.uint64\", \"uint64\").export_constant(__name__, \"uint64\")\nint16 = DType(types_pb2.DT_INT16)\ntf_export(\"dtypes.uint16\", \"int16\").export_constant(__name__, \"int16\")\nint8 = DType(types_pb2.DT_INT8)\ntf_export(\"dtypes.int8\", \"int8\").export_constant(__name__, \"int8\")\nstring = DType(types_pb2.DT_STRING)\ntf_export(\"dtypes.string\", \"string\").export_constant(__name__, \"string\")\ncomplex64 = DType(types_pb2.DT_COMPLEX64)\ntf_export(\"dtypes.complex64\", \"complex64\").export_constant(\n __name__, \"complex64\")\ncomplex128 = DType(types_pb2.DT_COMPLEX128)\ntf_export(\"dtypes.complex128\", \"complex128\").export_constant(\n __name__, \"complex128\")\nint64 = DType(types_pb2.DT_INT64)\ntf_export(\"dtypes.int64\", \"int64\").export_constant(__name__, \"int64\")\nbool = DType(types_pb2.DT_BOOL) # pylint: disable=redefined-builtin\ntf_export(\"dtypes.bool\", \"bool\").export_constant(__name__, \"bool\")\nqint8 = DType(types_pb2.DT_QINT8)\ntf_export(\"dtypes.qint8\", \"qint8\").export_constant(__name__, \"qint8\")\nquint8 = DType(types_pb2.DT_QUINT8)\ntf_export(\"dtypes.quint8\", \"quint8\").export_constant(__name__, \"quint8\")\nqint16 = DType(types_pb2.DT_QINT16)\ntf_export(\"dtypes.qint16\", \"qint16\").export_constant(__name__, \"qint16\")\nquint16 = DType(types_pb2.DT_QUINT16)\ntf_export(\"dtypes.quint16\", \"quint16\").export_constant(__name__, \"quint16\")\nqint32 = DType(types_pb2.DT_QINT32)\ntf_export(\"dtypes.qint32\", \"qint32\").export_constant(__name__, \"qint32\")\nresource_ref = DType(types_pb2.DT_RESOURCE_REF)\nvariant_ref = DType(types_pb2.DT_VARIANT_REF)\nbfloat16 = DType(types_pb2.DT_BFLOAT16)\ntf_export(\"dtypes.bfloat16\", \"bfloat16\").export_constant(__name__, \"bfloat16\")\nfloat16_ref = DType(types_pb2.DT_HALF_REF)\nhalf_ref = float16_ref\nfloat32_ref = DType(types_pb2.DT_FLOAT_REF)\nfloat64_ref = DType(types_pb2.DT_DOUBLE_REF)\ndouble_ref = float64_ref\nint32_ref = DType(types_pb2.DT_INT32_REF)\nuint32_ref = DType(types_pb2.DT_UINT32_REF)\nuint8_ref = DType(types_pb2.DT_UINT8_REF)\nuint16_ref = DType(types_pb2.DT_UINT16_REF)\nint16_ref = DType(types_pb2.DT_INT16_REF)\nint8_ref = DType(types_pb2.DT_INT8_REF)\nstring_ref = DType(types_pb2.DT_STRING_REF)\ncomplex64_ref = DType(types_pb2.DT_COMPLEX64_REF)\ncomplex128_ref = DType(types_pb2.DT_COMPLEX128_REF)\nint64_ref = DType(types_pb2.DT_INT64_REF)\nuint64_ref = DType(types_pb2.DT_UINT64_REF)\nbool_ref = DType(types_pb2.DT_BOOL_REF)\nqint8_ref = DType(types_pb2.DT_QINT8_REF)\nquint8_ref = DType(types_pb2.DT_QUINT8_REF)\nqint16_ref = DType(types_pb2.DT_QINT16_REF)\nquint16_ref = DType(types_pb2.DT_QUINT16_REF)\nqint32_ref = DType(types_pb2.DT_QINT32_REF)\nbfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)\n\n_NUMPY_INCOMPATIBLE = frozenset([\n types_pb2.DT_VARIANT, types_pb2.DT_VARIANT_REF, types_pb2.DT_RESOURCE,\n types_pb2.DT_RESOURCE_REF\n])\n\n# Maintain an intern table so that we don't have to create a large\n# number of small objects.\n_INTERN_TABLE = {\n types_pb2.DT_HALF: float16,\n types_pb2.DT_FLOAT: float32,\n types_pb2.DT_DOUBLE: float64,\n types_pb2.DT_INT32: int32,\n types_pb2.DT_UINT8: uint8,\n types_pb2.DT_UINT16: uint16,\n types_pb2.DT_UINT32: uint32,\n types_pb2.DT_UINT64: uint64,\n types_pb2.DT_INT16: int16,\n types_pb2.DT_INT8: int8,\n types_pb2.DT_STRING: string,\n types_pb2.DT_COMPLEX64: complex64,\n types_pb2.DT_COMPLEX128: complex128,\n types_pb2.DT_INT64: int64,\n types_pb2.DT_BOOL: bool,\n types_pb2.DT_QINT8: qint8,\n types_pb2.DT_QUINT8: quint8,\n types_pb2.DT_QINT16: qint16,\n types_pb2.DT_QUINT16: quint16,\n types_pb2.DT_QINT32: qint32,\n types_pb2.DT_BFLOAT16: bfloat16,\n types_pb2.DT_RESOURCE: resource,\n types_pb2.DT_VARIANT: variant,\n types_pb2.DT_HALF_REF: float16_ref,\n types_pb2.DT_FLOAT_REF: float32_ref,\n types_pb2.DT_DOUBLE_REF: float64_ref,\n types_pb2.DT_INT32_REF: int32_ref,\n types_pb2.DT_UINT32_REF: uint32_ref,\n types_pb2.DT_UINT8_REF: uint8_ref,\n types_pb2.DT_UINT16_REF: uint16_ref,\n types_pb2.DT_INT16_REF: int16_ref,\n types_pb2.DT_INT8_REF: int8_ref,\n types_pb2.DT_STRING_REF: string_ref,\n types_pb2.DT_COMPLEX64_REF: complex64_ref,\n types_pb2.DT_COMPLEX128_REF: complex128_ref,\n types_pb2.DT_INT64_REF: int64_ref,\n types_pb2.DT_UINT64_REF: uint64_ref,\n types_pb2.DT_BOOL_REF: bool_ref,\n types_pb2.DT_QINT8_REF: qint8_ref,\n types_pb2.DT_QUINT8_REF: quint8_ref,\n types_pb2.DT_QINT16_REF: qint16_ref,\n types_pb2.DT_QUINT16_REF: quint16_ref,\n types_pb2.DT_QINT32_REF: qint32_ref,\n types_pb2.DT_BFLOAT16_REF: bfloat16_ref,\n types_pb2.DT_RESOURCE_REF: resource_ref,\n types_pb2.DT_VARIANT_REF: variant_ref,\n}\n\n# Standard mappings between types_pb2.DataType values and string names.\n_TYPE_TO_STRING = {\n types_pb2.DT_HALF: \"float16\",\n types_pb2.DT_FLOAT: \"float32\",\n types_pb2.DT_DOUBLE: \"float64\",\n types_pb2.DT_INT32: \"int32\",\n types_pb2.DT_UINT8: \"uint8\",\n types_pb2.DT_UINT16: \"uint16\",\n types_pb2.DT_UINT32: \"uint32\",\n types_pb2.DT_UINT64: \"uint64\",\n types_pb2.DT_INT16: \"int16\",\n types_pb2.DT_INT8: \"int8\",\n types_pb2.DT_STRING: \"string\",\n types_pb2.DT_COMPLEX64: \"complex64\",\n types_pb2.DT_COMPLEX128: \"complex128\",\n types_pb2.DT_INT64: \"int64\",\n types_pb2.DT_BOOL: \"bool\",\n types_pb2.DT_QINT8: \"qint8\",\n types_pb2.DT_QUINT8: \"quint8\",\n types_pb2.DT_QINT16: \"qint16\",\n types_pb2.DT_QUINT16: \"quint16\",\n types_pb2.DT_QINT32: \"qint32\",\n types_pb2.DT_BFLOAT16: \"bfloat16\",\n types_pb2.DT_RESOURCE: \"resource\",\n types_pb2.DT_VARIANT: \"variant\",\n types_pb2.DT_HALF_REF: \"float16_ref\",\n types_pb2.DT_FLOAT_REF: \"float32_ref\",\n types_pb2.DT_DOUBLE_REF: \"float64_ref\",\n types_pb2.DT_INT32_REF: \"int32_ref\",\n types_pb2.DT_UINT32_REF: \"uint32_ref\",\n types_pb2.DT_UINT8_REF: \"uint8_ref\",\n types_pb2.DT_UINT16_REF: \"uint16_ref\",\n types_pb2.DT_INT16_REF: \"int16_ref\",\n types_pb2.DT_INT8_REF: \"int8_ref\",\n types_pb2.DT_STRING_REF: \"string_ref\",\n types_pb2.DT_COMPLEX64_REF: \"complex64_ref\",\n types_pb2.DT_COMPLEX128_REF: \"complex128_ref\",\n types_pb2.DT_INT64_REF: \"int64_ref\",\n types_pb2.DT_UINT64_REF: \"uint64_ref\",\n types_pb2.DT_BOOL_REF: \"bool_ref\",\n types_pb2.DT_QINT8_REF: \"qint8_ref\",\n types_pb2.DT_QUINT8_REF: \"quint8_ref\",\n types_pb2.DT_QINT16_REF: \"qint16_ref\",\n types_pb2.DT_QUINT16_REF: \"quint16_ref\",\n types_pb2.DT_QINT32_REF: \"qint32_ref\",\n types_pb2.DT_BFLOAT16_REF: \"bfloat16_ref\",\n types_pb2.DT_RESOURCE_REF: \"resource_ref\",\n types_pb2.DT_VARIANT_REF: \"variant_ref\",\n}\n_STRING_TO_TF = {\n value: _INTERN_TABLE[key]\n for key, value in _TYPE_TO_STRING.items()\n}\n# Add non-canonical aliases.\n_STRING_TO_TF[\"half\"] = float16\n_STRING_TO_TF[\"half_ref\"] = float16_ref\n_STRING_TO_TF[\"float\"] = float32\n_STRING_TO_TF[\"float_ref\"] = float32_ref\n_STRING_TO_TF[\"double\"] = float64\n_STRING_TO_TF[\"double_ref\"] = float64_ref\n\n# Numpy representation for quantized dtypes.\n#\n# These are magic strings that are used in the swig wrapper to identify\n# quantized types.\n# TODO(mrry,keveman): Investigate Numpy type registration to replace this\n# hard-coding of names.\n_np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n_np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n_np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n_np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n_np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n\n# _np_bfloat16 is defined by a module import.\n\n# Custom struct dtype for directly-fed ResourceHandles of supported type(s).\nnp_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n\n# Standard mappings between types_pb2.DataType values and numpy.dtypes.\n_NP_TO_TF = frozenset([\n (np.float16, float16),\n (np.float32, float32),\n (np.float64, float64),\n (np.int32, int32),\n (np.int64, int64),\n (np.uint8, uint8),\n (np.uint16, uint16),\n (np.uint32, uint32),\n (np.uint64, uint64),\n (np.int16, int16),\n (np.int8, int8),\n (np.complex64, complex64),\n (np.complex128, complex128),\n (np.object_, string),\n (np.bool_, bool),\n (_np_qint8, qint8),\n (_np_quint8, quint8),\n (_np_qint16, qint16),\n (_np_quint16, quint16),\n (_np_qint32, qint32),\n (_np_bfloat16, bfloat16),\n])\n_TF_TO_NP = {\n types_pb2.DT_HALF:\n np.float16,\n types_pb2.DT_FLOAT:\n np.float32,\n types_pb2.DT_DOUBLE:\n np.float64,\n types_pb2.DT_INT32:\n np.int32,\n types_pb2.DT_UINT8:\n np.uint8,\n types_pb2.DT_UINT16:\n np.uint16,\n types_pb2.DT_UINT32:\n np.uint32,\n types_pb2.DT_UINT64:\n np.uint64,\n types_pb2.DT_INT16:\n np.int16,\n types_pb2.DT_INT8:\n np.int8,\n # NOTE(touts): For strings we use np.object as it supports variable length\n # strings.\n types_pb2.DT_STRING:\n np.object,\n types_pb2.DT_COMPLEX64:\n np.complex64,\n types_pb2.DT_COMPLEX128:\n np.complex128,\n types_pb2.DT_INT64:\n np.int64,\n types_pb2.DT_BOOL:\n np.bool,\n types_pb2.DT_QINT8:\n _np_qint8,\n types_pb2.DT_QUINT8:\n _np_quint8,\n types_pb2.DT_QINT16:\n _np_qint16,\n types_pb2.DT_QUINT16:\n _np_quint16,\n types_pb2.DT_QINT32:\n _np_qint32,\n types_pb2.DT_BFLOAT16:\n _np_bfloat16,\n\n # Ref types\n types_pb2.DT_HALF_REF:\n np.float16,\n types_pb2.DT_FLOAT_REF:\n np.float32,\n types_pb2.DT_DOUBLE_REF:\n np.float64,\n types_pb2.DT_INT32_REF:\n np.int32,\n types_pb2.DT_UINT32_REF:\n np.uint32,\n types_pb2.DT_UINT8_REF:\n np.uint8,\n types_pb2.DT_UINT16_REF:\n np.uint16,\n types_pb2.DT_INT16_REF:\n np.int16,\n types_pb2.DT_INT8_REF:\n np.int8,\n types_pb2.DT_STRING_REF:\n np.object,\n types_pb2.DT_COMPLEX64_REF:\n np.complex64,\n types_pb2.DT_COMPLEX128_REF:\n np.complex128,\n types_pb2.DT_INT64_REF:\n np.int64,\n types_pb2.DT_UINT64_REF:\n np.uint64,\n types_pb2.DT_BOOL_REF:\n np.bool,\n types_pb2.DT_QINT8_REF:\n _np_qint8,\n types_pb2.DT_QUINT8_REF:\n _np_quint8,\n types_pb2.DT_QINT16_REF:\n _np_qint16,\n types_pb2.DT_QUINT16_REF:\n _np_quint16,\n types_pb2.DT_QINT32_REF:\n _np_qint32,\n types_pb2.DT_BFLOAT16_REF:\n _np_bfloat16,\n}\n\n_QUANTIZED_DTYPES_NO_REF = frozenset([qint8, quint8, qint16, quint16, qint32])\n_QUANTIZED_DTYPES_REF = frozenset(\n [qint8_ref, quint8_ref, qint16_ref, quint16_ref, qint32_ref])\nQUANTIZED_DTYPES = _QUANTIZED_DTYPES_REF.union(_QUANTIZED_DTYPES_NO_REF)\ntf_export(\n \"dtypes.QUANTIZED_DTYPES\",\n v1=[\"dtypes.QUANTIZED_DTYPES\", \"QUANTIZED_DTYPES\"]).export_constant(\n __name__, \"QUANTIZED_DTYPES\")\n\n_PYTHON_TO_TF = {\n builtins.float: float32,\n builtins.bool: bool,\n builtins.object: string\n}\n\n\n@tf_export(\"dtypes.as_dtype\", \"as_dtype\")\ndef as_dtype(type_value):\n \"\"\"Converts the given `type_value` to a `DType`.\n\n Args:\n type_value: A value that can be converted to a `tf.DType` object. This may\n currently be a `tf.DType` object, a [`DataType`\n enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),\n a string type name, or a `numpy.dtype`.\n\n Returns:\n A `DType` corresponding to `type_value`.\n\n Raises:\n TypeError: If `type_value` cannot be converted to a `DType`.\n \"\"\"\n if isinstance(type_value, DType):\n return type_value\n\n try:\n return _INTERN_TABLE[type_value]\n except KeyError:\n pass\n\n try:\n return _STRING_TO_TF[type_value]\n except KeyError:\n pass\n\n try:\n return _PYTHON_TO_TF[type_value]\n except KeyError:\n pass\n\n if isinstance(type_value, np.dtype):\n # The numpy dtype for strings is variable length. We can not compare\n # dtype with a single constant (np.string does not exist) to decide\n # dtype is a \"string\" type. We need to compare the dtype.type to be\n # sure it's a string type.\n if type_value.type == np.string_ or type_value.type == np.unicode_:\n return string\n\n if isinstance(type_value, (type, np.dtype)):\n for key, val in _NP_TO_TF:\n try:\n if key == type_value:\n return val\n except TypeError as e:\n raise TypeError(\"Cannot convert {} to a dtype. {}\".format(\n type_value, e))\n\n raise TypeError(\"Cannot convert value %r to a TensorFlow DType.\" % type_value)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes as dtypes_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\nclass MatrixDiagTest(test.TestCase):\n\n def testVector(self):\n with self.session(use_gpu=True):\n v = np.array([1.0, 2.0, 3.0])\n mat = np.diag(v)\n v_diag = array_ops.matrix_diag(v)\n self.assertEqual((3, 3), v_diag.get_shape())\n self.assertAllEqual(v_diag.eval(), mat)\n\n def _testBatchVector(self, dtype):\n with self.cached_session(use_gpu=True):\n v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)\n mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],\n [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],\n [0.0, 0.0, 6.0]]]).astype(dtype)\n v_batch_diag = array_ops.matrix_diag(v_batch)\n self.assertEqual((2, 3, 3), v_batch_diag.get_shape())\n self.assertAllEqual(v_batch_diag.eval(), mat_batch)\n\n def testBatchVector(self):\n self._testBatchVector(np.float32)\n self._testBatchVector(np.float64)\n self._testBatchVector(np.int32)\n self._testBatchVector(np.int64)\n self._testBatchVector(np.bool)\n\n def testInvalidShape(self):\n with self.assertRaisesRegexp(ValueError, \"must be at least rank 1\"):\n array_ops.matrix_diag(0)\n\n def testInvalidShapeAtEval(self):\n with self.session(use_gpu=True):\n v = array_ops.placeholder(dtype=dtypes_lib.float32)\n with self.assertRaisesOpError(\"input must be at least 1-dim\"):\n array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})\n\n def testGrad(self):\n shapes = ((3,), (7, 4))\n with self.session(use_gpu=True):\n for shape in shapes:\n x = constant_op.constant(np.random.rand(*shape), np.float32)\n y = array_ops.matrix_diag(x)\n error = gradient_checker.compute_gradient_error(x,\n x.get_shape().as_list(),\n y,\n y.get_shape().as_list())\n self.assertLess(error, 1e-4)\n\n\nclass MatrixSetDiagTest(test.TestCase):\n\n def testSquare(self):\n with self.session(use_gpu=True):\n v = np.array([1.0, 2.0, 3.0])\n mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])\n mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],\n [1.0, 1.0, 3.0]])\n output = array_ops.matrix_set_diag(mat, v)\n self.assertEqual((3, 3), output.get_shape())\n self.assertAllEqual(mat_set_diag, self.evaluate(output))\n\n def testRectangular(self):\n with self.session(use_gpu=True):\n v = np.array([3.0, 4.0])\n mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])\n expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])\n output = array_ops.matrix_set_diag(mat, v)\n self.assertEqual((2, 3), output.get_shape())\n self.assertAllEqual(expected, self.evaluate(output))\n\n v = np.array([3.0, 4.0])\n mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])\n expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])\n output = array_ops.matrix_set_diag(mat, v)\n self.assertEqual((3, 2), output.get_shape())\n self.assertAllEqual(expected, self.evaluate(output))\n\n def _testSquareBatch(self, dtype):\n with self.cached_session(use_gpu=True):\n v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)\n mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],\n [[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],\n [2.0, 0.0, 6.0]]]).astype(dtype)\n\n mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0],\n [1.0, 0.0, -3.0]],\n [[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0],\n [2.0, 0.0, -6.0]]]).astype(dtype)\n\n output = array_ops.matrix_set_diag(mat_batch, v_batch)\n self.assertEqual((2, 3, 3), output.get_shape())\n self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))\n\n def testSquareBatch(self):\n self._testSquareBatch(np.float32)\n self._testSquareBatch(np.float64)\n self._testSquareBatch(np.int32)\n self._testSquareBatch(np.int64)\n self._testSquareBatch(np.bool)\n\n def testRectangularBatch(self):\n with self.session(use_gpu=True):\n v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])\n mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],\n [[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])\n\n mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],\n [[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])\n output = array_ops.matrix_set_diag(mat_batch, v_batch)\n self.assertEqual((2, 2, 3), output.get_shape())\n self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))\n\n def testInvalidShape(self):\n with self.assertRaisesRegexp(ValueError, \"must be at least rank 2\"):\n array_ops.matrix_set_diag(0, [0])\n with self.assertRaisesRegexp(ValueError, \"must be at least rank 1\"):\n array_ops.matrix_set_diag([[0]], 0)\n\n def testInvalidShapeAtEval(self):\n with self.session(use_gpu=True):\n v = array_ops.placeholder(dtype=dtypes_lib.float32)\n with self.assertRaisesOpError(\"input must be at least 2-dim\"):\n array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})\n with self.assertRaisesOpError(\n r\"but received input shape: \\[1,1\\] and diagonal shape: \\[\\]\"):\n array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})\n\n def testGrad(self):\n shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))\n with self.session(use_gpu=True):\n for shape in shapes:\n x = constant_op.constant(\n np.random.rand(*shape), dtype=dtypes_lib.float32)\n diag_shape = shape[:-2] + (min(shape[-2:]),)\n x_diag = constant_op.constant(\n np.random.rand(*diag_shape), dtype=dtypes_lib.float32)\n y = array_ops.matrix_set_diag(x, x_diag)\n error_x = gradient_checker.compute_gradient_error(\n x,\n x.get_shape().as_list(), y,\n y.get_shape().as_list())\n self.assertLess(error_x, 1e-4)\n error_x_diag = gradient_checker.compute_gradient_error(\n x_diag,\n x_diag.get_shape().as_list(), y,\n y.get_shape().as_list())\n self.assertLess(error_x_diag, 1e-4)\n\n def testGradWithNoShapeInformation(self):\n with self.session(use_gpu=True) as sess:\n v = array_ops.placeholder(dtype=dtypes_lib.float32)\n mat = array_ops.placeholder(dtype=dtypes_lib.float32)\n grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)\n output = array_ops.matrix_set_diag(mat, v)\n grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)\n grad_input_val = np.random.rand(3, 3).astype(np.float32)\n grad_vals = sess.run(\n grads,\n feed_dict={\n v: 2 * np.ones(3),\n mat: np.ones((3, 3)),\n grad_input: grad_input_val\n })\n self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])\n self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),\n grad_vals[0])\n\n\nclass MatrixDiagPartTest(test.TestCase):\n\n def testSquare(self):\n with self.session(use_gpu=True):\n v = np.array([1.0, 2.0, 3.0])\n mat = np.diag(v)\n mat_diag = array_ops.matrix_diag_part(mat)\n self.assertEqual((3,), mat_diag.get_shape())\n self.assertAllEqual(mat_diag.eval(), v)\n\n def testRectangular(self):\n with self.session(use_gpu=True):\n mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n mat_diag = array_ops.matrix_diag_part(mat)\n self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))\n mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\n mat_diag = array_ops.matrix_diag_part(mat)\n self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))\n\n def _testSquareBatch(self, dtype):\n with self.cached_session(use_gpu=True):\n v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)\n mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],\n [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],\n [0.0, 0.0, 6.0]]]).astype(dtype)\n self.assertEqual(mat_batch.shape, (2, 3, 3))\n mat_batch_diag = array_ops.matrix_diag_part(mat_batch)\n self.assertEqual((2, 3), mat_batch_diag.get_shape())\n self.assertAllEqual(mat_batch_diag.eval(), v_batch)\n\n def testSquareBatch(self):\n self._testSquareBatch(np.float32)\n self._testSquareBatch(np.float64)\n self._testSquareBatch(np.int32)\n self._testSquareBatch(np.int64)\n self._testSquareBatch(np.bool)\n\n def testRectangularBatch(self):\n with self.session(use_gpu=True):\n v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])\n mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],\n [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])\n self.assertEqual(mat_batch.shape, (2, 2, 3))\n mat_batch_diag = array_ops.matrix_diag_part(mat_batch)\n self.assertEqual((2, 2), mat_batch_diag.get_shape())\n self.assertAllEqual(mat_batch_diag.eval(), v_batch)\n\n def testInvalidShape(self):\n with self.assertRaisesRegexp(ValueError, \"must be at least rank 2\"):\n array_ops.matrix_diag_part(0)\n\n def testInvalidShapeAtEval(self):\n with self.session(use_gpu=True):\n v = array_ops.placeholder(dtype=dtypes_lib.float32)\n with self.assertRaisesOpError(\"input must be at least 2-dim\"):\n array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})\n\n def testGrad(self):\n shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))\n with self.session(use_gpu=True):\n for shape in shapes:\n x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)\n y = array_ops.matrix_diag_part(x)\n error = gradient_checker.compute_gradient_error(x,\n x.get_shape().as_list(),\n y,\n y.get_shape().as_list())\n self.assertLess(error, 1e-4)\n\n\nclass DiagTest(test.TestCase):\n\n def _diagOp(self, diag, dtype, expected_ans, use_gpu):\n with self.cached_session(use_gpu=use_gpu):\n tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))\n out = self.evaluate(tf_ans)\n tf_ans_inv = array_ops.diag_part(expected_ans)\n inv_out = self.evaluate(tf_ans_inv)\n self.assertAllClose(out, expected_ans)\n self.assertAllClose(inv_out, diag)\n self.assertShapeEqual(expected_ans, tf_ans)\n self.assertShapeEqual(diag, tf_ans_inv)\n\n def diagOp(self, diag, dtype, expected_ans):\n self._diagOp(diag, dtype, expected_ans, False)\n self._diagOp(diag, dtype, expected_ans, True)\n\n def testEmptyTensor(self):\n x = np.array([])\n expected_ans = np.empty([0, 0])\n self.diagOp(x, np.int32, expected_ans)\n\n def testRankOneIntTensor(self):\n x = np.array([1, 2, 3])\n expected_ans = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]])\n self.diagOp(x, np.int32, expected_ans)\n self.diagOp(x, np.int64, expected_ans)\n\n def testRankOneFloatTensor(self):\n x = np.array([1.1, 2.2, 3.3])\n expected_ans = np.array([[1.1, 0, 0], [0, 2.2, 0], [0, 0, 3.3]])\n self.diagOp(x, np.float32, expected_ans)\n self.diagOp(x, np.float64, expected_ans)\n\n def testRankOneComplexTensor(self):\n for dtype in [np.complex64, np.complex128]:\n x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype=dtype)\n expected_ans = np.array(\n [[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 2.2 + 2.2j, 0 + 0j],\n [0 + 0j, 0 + 0j, 3.3 + 3.3j]],\n dtype=dtype)\n self.diagOp(x, dtype, expected_ans)\n\n def testRankTwoIntTensor(self):\n x = np.array([[1, 2, 3], [4, 5, 6]])\n expected_ans = np.array([[[[1, 0, 0], [0, 0, 0]], [[0, 2, 0], [0, 0, 0]],\n [[0, 0, 3], [0, 0, 0]]],\n [[[0, 0, 0], [4, 0, 0]], [[0, 0, 0], [0, 5, 0]],\n [[0, 0, 0], [0, 0, 6]]]])\n self.diagOp(x, np.int32, expected_ans)\n self.diagOp(x, np.int64, expected_ans)\n\n def testRankTwoFloatTensor(self):\n x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])\n expected_ans = np.array(\n [[[[1.1, 0, 0], [0, 0, 0]], [[0, 2.2, 0], [0, 0, 0]],\n [[0, 0, 3.3], [0, 0, 0]]], [[[0, 0, 0], [4.4, 0, 0]],\n [[0, 0, 0], [0, 5.5, 0]], [[0, 0, 0],\n [0, 0, 6.6]]]])\n self.diagOp(x, np.float32, expected_ans)\n self.diagOp(x, np.float64, expected_ans)\n\n def testRankTwoComplexTensor(self):\n for dtype in [np.complex64, np.complex128]:\n x = np.array(\n [[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j],\n [4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]],\n dtype=dtype)\n expected_ans = np.array(\n [[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]], [\n [0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]\n ], [[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]], [[\n [0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]\n ], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]\n ], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]],\n dtype=dtype)\n self.diagOp(x, dtype, expected_ans)\n\n def testRankThreeFloatTensor(self):\n x = np.array([[[1.1, 2.2], [3.3, 4.4]], [[5.5, 6.6], [7.7, 8.8]]])\n expected_ans = np.array([[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],\n [[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],\n [[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],\n [[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],\n [[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],\n [[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],\n [[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],\n [[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])\n self.diagOp(x, np.float32, expected_ans)\n self.diagOp(x, np.float64, expected_ans)\n\n def testRankThreeComplexTensor(self):\n for dtype in [np.complex64, np.complex128]:\n x = np.array(\n [[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]],\n [[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]],\n dtype=dtype)\n expected_ans = np.array(\n [[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [\n 0 + 0j, 0 + 0j\n ]]], [[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [\n 0 + 0j, 0 + 0j\n ]]]], [[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]], [[0 + 0j, 0 + 0j], [\n 0 + 0j, 0 + 0j\n ]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]], [[0 + 0j, 0 + 0j], [\n 0 + 0j, 0 + 0j\n ]]]]], [[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [\n [5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]\n ]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 6.6 + 6.6j], [\n 0 + 0j, 0 + 0j\n ]]]], [[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [\n 7.7 + 7.7j, 0 + 0j\n ]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],\n [[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],\n dtype=dtype)\n self.diagOp(x, dtype, expected_ans)\n\n def testRankFourNumberTensor(self):\n for dtype in [np.float32, np.float64, np.int64, np.int32]:\n # Input with shape [2, 1, 2, 3]\n x = np.array(\n [[[[1, 2, 3], [4, 5, 6]]], [[[7, 8, 9], [10, 11, 12]]]], dtype=dtype)\n # Output with shape [2, 1, 2, 3, 2, 1, 2, 3]\n expected_ans = np.array(\n [[[[[[[[1, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [\n [[[0, 2, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]\n ], [[[[0, 0, 3], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]]], [[\n [[[0, 0, 0], [4, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]\n ], [[[[0, 0, 0], [0, 5, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [\n [[[0, 0, 0], [0, 0, 6]]], [[[0, 0, 0], [0, 0, 0]]]\n ]]]], [[[[[[[0, 0, 0], [0, 0, 0]]], [[[7, 0, 0], [0, 0, 0]]]], [\n [[[0, 0, 0], [0, 0, 0]]], [[[0, 8, 0], [0, 0, 0]]]\n ], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 9], [0, 0, 0]]]]], [[\n [[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [10, 0, 0]]]\n ], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 11, 0]]]\n ], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 12]]]]]]]],\n dtype=dtype)\n self.diagOp(x, dtype, expected_ans)\n\n def testInvalidRank(self):\n with self.assertRaisesRegexp(ValueError, \"must be at least rank 1\"):\n array_ops.diag(0.0)\n\n\nclass DiagPartOpTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(0)\n\n def _diagPartOp(self, tensor, dtype, expected_ans, use_gpu):\n with self.cached_session(use_gpu=use_gpu):\n tensor = ops.convert_to_tensor(tensor.astype(dtype))\n tf_ans_inv = array_ops.diag_part(tensor)\n inv_out = self.evaluate(tf_ans_inv)\n self.assertAllClose(inv_out, expected_ans)\n self.assertShapeEqual(expected_ans, tf_ans_inv)\n\n def diagPartOp(self, tensor, dtype, expected_ans):\n self._diagPartOp(tensor, dtype, expected_ans, False)\n self._diagPartOp(tensor, dtype, expected_ans, True)\n\n def testRankTwoFloatTensor(self):\n x = np.random.rand(3, 3)\n i = np.arange(3)\n expected_ans = x[i, i]\n self.diagPartOp(x, np.float32, expected_ans)\n self.diagPartOp(x, np.float64, expected_ans)\n\n def testRankFourFloatTensorUnknownShape(self):\n x = np.random.rand(3, 3)\n i = np.arange(3)\n expected_ans = x[i, i]\n for shape in None, (None, 3), (3, None):\n with self.cached_session(use_gpu=False):\n t = ops.convert_to_tensor(x.astype(np.float32))\n t.set_shape(shape)\n tf_ans = array_ops.diag_part(t)\n out = self.evaluate(tf_ans)\n self.assertAllClose(out, expected_ans)\n self.assertShapeEqual(expected_ans, tf_ans)\n\n def testRankFourFloatTensor(self):\n x = np.random.rand(2, 3, 2, 3)\n i = np.arange(2)[:, None]\n j = np.arange(3)\n expected_ans = x[i, j, i, j]\n self.diagPartOp(x, np.float32, expected_ans)\n self.diagPartOp(x, np.float64, expected_ans)\n\n def testRankSixFloatTensor(self):\n x = np.random.rand(2, 2, 2, 2, 2, 2)\n i = np.arange(2)[:, None, None]\n j = np.arange(2)[:, None]\n k = np.arange(2)\n expected_ans = x[i, j, k, i, j, k]\n self.diagPartOp(x, np.float32, expected_ans)\n self.diagPartOp(x, np.float64, expected_ans)\n\n def testRankEightComplexTensor(self):\n x = np.random.rand(2, 2, 2, 3, 2, 2, 2, 3)\n i = np.arange(2)[:, None, None, None]\n j = np.arange(2)[:, None, None]\n k = np.arange(2)[:, None]\n l = np.arange(3)\n expected_ans = x[i, j, k, l, i, j, k, l]\n self.diagPartOp(x, np.complex64, expected_ans)\n self.diagPartOp(x, np.complex128, expected_ans)\n\n def testOddRank(self):\n w = np.random.rand(2)\n x = np.random.rand(2, 2, 2)\n self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)\n self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)\n with self.assertRaises(ValueError):\n array_ops.diag_part(0.0)\n\n def testUnevenDimensions(self):\n w = np.random.rand(2, 5)\n x = np.random.rand(2, 1, 2, 3)\n self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)\n self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)\n\n\nclass DiagGradOpTest(test.TestCase):\n\n def testDiagGrad(self):\n np.random.seed(0)\n shapes = ((3,), (3, 3), (3, 3, 3))\n dtypes = (dtypes_lib.float32, dtypes_lib.float64)\n with self.session(use_gpu=False):\n errors = []\n for shape in shapes:\n for dtype in dtypes:\n x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)\n y = array_ops.diag(x1)\n error = gradient_checker.compute_gradient_error(\n x1,\n x1.get_shape().as_list(), y,\n y.get_shape().as_list())\n tf_logging.info(\"error = %f\", error)\n self.assertLess(error, 1e-4)\n\n\nclass DiagGradPartOpTest(test.TestCase):\n\n def testDiagPartGrad(self):\n np.random.seed(0)\n shapes = ((3, 3), (3, 3, 3, 3))\n dtypes = (dtypes_lib.float32, dtypes_lib.float64)\n with self.session(use_gpu=False):\n errors = []\n for shape in shapes:\n for dtype in dtypes:\n x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)\n y = array_ops.diag_part(x1)\n error = gradient_checker.compute_gradient_error(\n x1,\n x1.get_shape().as_list(), y,\n y.get_shape().as_list())\n tf_logging.info(\"error = %f\", error)\n self.assertLess(error, 1e-4)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.platform import test\n\n\nclass DatasetConstructorTest(test_base.DatasetTestBase):\n\n def testFromTensors(self):\n \"\"\"Test a dataset that represents a single tuple of tensors.\"\"\"\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))\n\n iterator = (dataset_ops.Dataset.from_tensors(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([c.shape for c in components],\n [t.shape for t in get_next])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n results = self.evaluate(get_next)\n for component, result_component in zip(components, results):\n self.assertAllEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromTensorsSparse(self):\n \"\"\"Test a dataset that represents a single tuple of tensors.\"\"\"\n components = (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 1]]),\n values=np.array([-1, 1]),\n dense_shape=np.array([2, 2])))\n\n iterator = (\n dataset_ops.Dataset.from_tensors(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual(\n [tensor_shape.TensorShape(c.dense_shape) for c in components],\n [shape for shape in iterator.output_shapes])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n results = self.evaluate(get_next)\n for component, result_component in zip(components, results):\n self.assertSparseValuesEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromTensorsMixed(self):\n \"\"\"Test an dataset that represents a single tuple of tensors.\"\"\"\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 1]]),\n values=np.array([-1, 1]),\n dense_shape=np.array([2, 2])))\n\n iterator = (\n dataset_ops.Dataset.from_tensors(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([\n tensor_shape.TensorShape(c.dense_shape)\n if sparse_tensor.is_sparse(c) else c.shape for c in components\n ], [shape for shape in iterator.output_shapes])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n results = self.evaluate(get_next)\n for component, result_component in zip(components, results):\n if sparse_tensor.is_sparse(component):\n self.assertSparseValuesEqual(component, result_component)\n else:\n self.assertAllEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromTensorSlices(self):\n \"\"\"Test a dataset that represents the slices from a tuple of tensors.\"\"\"\n components = (\n np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(\n np.array([[12], [13], [14], [15]]), 22),\n np.array([37.0, 38.0, 39.0, 40.0])\n )\n\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([c.shape[1:] for c in components],\n [t.shape for t in get_next])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n for i in range(4):\n results = self.evaluate(get_next)\n for component, result_component in zip(components, results):\n self.assertAllEqual(component[i], result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromTensorSlicesSparse(self):\n \"\"\"Test a dataset that represents the slices from a tuple of tensors.\"\"\"\n components = (sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 0], [2, 0]]),\n values=np.array([0, 0, 0]),\n dense_shape=np.array([3, 1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 1], [2, 2]]),\n values=np.array([1, 2, 3]),\n dense_shape=np.array([3, 3])))\n\n iterator = (\n dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual(\n [tensor_shape.TensorShape(c.dense_shape[1:]) for c in components],\n [shape for shape in iterator.output_shapes])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n expected = [\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([1]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[1]]),\n values=np.array([2]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[2]]),\n values=np.array([3]),\n dense_shape=np.array([3]))),\n ]\n for i in range(3):\n results = self.evaluate(get_next)\n for component, result_component in zip(expected[i], results):\n self.assertSparseValuesEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromTensorSlicesMixed(self):\n \"\"\"Test a dataset that represents the slices from a tuple of tensors.\"\"\"\n components = (np.tile(np.array([[1], [2], [3]]), 20),\n np.tile(np.array([[12], [13], [14]]), 22),\n np.array([37.0, 38.0, 39.0]),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 0], [2, 0]]),\n values=np.array([0, 0, 0]),\n dense_shape=np.array([3, 1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0], [1, 1], [2, 2]]),\n values=np.array([1, 2, 3]),\n dense_shape=np.array([3, 3])))\n\n iterator = (\n dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([\n tensor_shape.TensorShape(c.dense_shape[1:])\n if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components\n ], [shape for shape in iterator.output_shapes])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n expected = [\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([1]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[1]]),\n values=np.array([2]),\n dense_shape=np.array([3]))),\n (sparse_tensor.SparseTensorValue(\n indices=np.array([[0]]),\n values=np.array([0]),\n dense_shape=np.array([1])),\n sparse_tensor.SparseTensorValue(\n indices=np.array([[2]]),\n values=np.array([3]),\n dense_shape=np.array([3]))),\n ]\n for i in range(3):\n results = self.evaluate(get_next)\n for component, result_component in zip(\n (list(zip(*components[:3]))[i] + expected[i]), results):\n if sparse_tensor.is_sparse(component):\n self.assertSparseValuesEqual(component, result_component)\n else:\n self.assertAllEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromTensorSlicesWithDict(self):\n components = {\"foo\": [1, 2, 3], \"bar\": [[4.0], [5.0], [6.0]]}\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual(dtypes.int32, iterator.output_types[\"foo\"])\n self.assertEqual(dtypes.float32, iterator.output_types[\"bar\"])\n self.assertEqual((), iterator.output_shapes[\"foo\"])\n self.assertEqual((1,), iterator.output_shapes[\"bar\"])\n\n with self.cached_session() as sess:\n self.evaluate(init_op)\n for i in range(3):\n results = self.evaluate(get_next)\n self.assertEqual(components[\"foo\"][i], results[\"foo\"])\n self.assertEqual(components[\"bar\"][i], results[\"bar\"])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromSparseTensorSlices(self):\n \"\"\"Test a dataset based on slices of a `tf.SparseTensor`.\"\"\"\n st = array_ops.sparse_placeholder(dtypes.float64)\n iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = sparse_tensor.SparseTensor(*iterator.get_next())\n\n with self.cached_session() as sess:\n slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]\n\n # Test with sparse tensor in the appropriate order.\n indices = np.array(\n [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])\n values = np.array([val for s in slices for val in s])\n dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])\n sparse_feed = sparse_tensor.SparseTensorValue(indices, values,\n dense_shape)\n sess.run(init_op, feed_dict={st: sparse_feed})\n for i, s in enumerate(slices):\n results = self.evaluate(get_next)\n self.assertAllEqual(s, results.values)\n expected_indices = np.array(\n [[j] for j in range(len(slices[i]))]).reshape([-1, 1])\n self.assertAllEqual(expected_indices, results.indices)\n self.assertAllEqual(dense_shape[1:], results.dense_shape)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Test with sparse tensor in the reverse order, which is not\n # currently supported.\n reverse_order_indices = indices[::-1, :]\n reverse_order_values = values[::-1]\n sparse_feed = sparse_tensor.SparseTensorValue(\n reverse_order_indices, reverse_order_values, dense_shape)\n with self.assertRaises(errors.UnimplementedError):\n sess.run(init_op, feed_dict={st: sparse_feed})\n\n # Test with an empty sparse tensor.\n empty_indices = np.empty((0, 4), dtype=np.int64)\n empty_values = np.empty((0,), dtype=np.float64)\n empty_dense_shape = [0, 4, 37, 9]\n sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,\n empty_dense_shape)\n sess.run(init_op, feed_dict={st: sparse_feed})\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # pylint: disable=g-long-lambda,unnecessary-lambda\n def testNestedStructure(self):\n components = (np.array([1, 2, 3], dtype=np.int64),\n (np.array([4., 5.]), np.array([6., 7.])),\n np.array([8, 9, 10], dtype=np.int64))\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.shuffle(10, 10)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.repeat(-1)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.filter(lambda x, y, z: True)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.take(5)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)\n\n dataset = dataset.flat_map(\n lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),\n (y[0], y[1])))\n )\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)\n\n dataset = dataset.batch(32)\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),\n nest.pack_sequence_as(dataset.output_shapes, [\n s.as_list()\n for s in nest.flatten(dataset.output_shapes)\n ]))\n\n iterator = dataset.make_one_shot_iterator()\n (w, x), (y, z) = iterator.get_next()\n self.assertEquals(dtypes.int64, w.dtype)\n self.assertEquals(dtypes.int64, x.dtype)\n self.assertEquals(dtypes.float64, y.dtype)\n self.assertEquals(dtypes.float64, z.dtype)\n self.assertEquals([None, 3], w.shape.as_list())\n self.assertEquals([None, 3], x.shape.as_list())\n self.assertEquals([None, 2], y.shape.as_list())\n self.assertEquals([None, 2], z.shape.as_list())\n\n iterator = dataset.make_initializable_iterator()\n (w, x), (y, z) = iterator.get_next()\n self.assertEquals(dtypes.int64, w.dtype)\n self.assertEquals(dtypes.int64, x.dtype)\n self.assertEquals(dtypes.float64, y.dtype)\n self.assertEquals(dtypes.float64, z.dtype)\n self.assertEquals([None, 3], w.shape.as_list())\n self.assertEquals([None, 3], x.shape.as_list())\n self.assertEquals([None, 2], y.shape.as_list())\n self.assertEquals([None, 2], z.shape.as_list())\n\n # Define a separate set of components with matching leading\n # dimension for the from-slices constructor.\n components_for_slices = (np.array([1, 2, 3], dtype=np.int64),\n (np.array([4., 5., 6.]),\n np.array([7., 8., 9.])),\n np.array([10, 11, 12], dtype=np.int64))\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([], ([], []), []), dataset.output_shapes)\n\n def testNestedDict(self):\n components = {\"a\": {\"aa\": 1, \"ab\": [2.0, 2.0]}, \"b\": [3, 3, 3]}\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals(dtypes.int32, dataset.output_types[\"a\"][\"aa\"])\n self.assertEquals(dtypes.float32, dataset.output_types[\"a\"][\"ab\"])\n self.assertEquals(dtypes.int32, dataset.output_types[\"b\"])\n self.assertEquals([], dataset.output_shapes[\"a\"][\"aa\"])\n self.assertEquals([2], dataset.output_shapes[\"a\"][\"ab\"])\n self.assertEquals([3], dataset.output_shapes[\"b\"])\n\n def testNonSequenceNestedStructure(self):\n components = np.array([1, 2, 3], dtype=np.int64)\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n dataset = dataset.filter(\n lambda x: math_ops.reduce_all(math_ops.equal(x, components)))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n dataset = dataset.map(lambda x: array_ops.stack([x, x]))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([2, 3], dataset.output_shapes)\n\n dataset = dataset.flat_map(\n lambda x: dataset_ops.Dataset.from_tensor_slices(x))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n iterator = dataset.make_one_shot_iterator()\n get_next = iterator.get_next()\n self.assertEquals(dtypes.int64, get_next.dtype)\n self.assertEquals([3], get_next.shape)\n\n def testSplitPipelineFailsWithPlacementError(self):\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n\n dataset = dataset_ops.Dataset.from_tensors(0)\n\n # Define a pipeline that attempts to use variables on two\n # different devices.\n #\n # Initialize the variables before creating to iterator, to avoid the\n # placement algorithm overriding the DT_RESOURCE colocation constraints.\n with ops.device(\"/cpu:0\"):\n var_0 = resource_variable_ops.ResourceVariable(initial_value=0)\n dataset = dataset.map(lambda x: x + var_0.read_value())\n self.evaluate(var_0.initializer)\n\n with ops.device(\"/cpu:1\"):\n var_1 = resource_variable_ops.ResourceVariable(initial_value=0)\n dataset = dataset.map(lambda x: x + var_1.read_value())\n self.evaluate(var_1.initializer)\n\n iterator = dataset.make_initializable_iterator()\n self.evaluate(iterator.initializer)\n\n with self.assertRaisesRegexp(\n errors.FailedPreconditionError,\n \"Error while reading resource variable Variable\"):\n sess.run(iterator.get_next())\n\n\nclass DatasetConstructorBenchmark(test.Benchmark):\n\n def benchmarkSliceRepeatBatch(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n dataset_ops.Dataset.from_tensor_slices(input_data)\n .repeat(num_epochs + 1).batch(batch_size))\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with session.Session() as sess:\n self.evaluate(iterator.initializer)\n # Run one whole epoch to burn in the computation.\n for _ in range(input_size // batch_size):\n sess.run(next_element)\n deltas = []\n try:\n while True:\n start = time.time()\n sess.run(next_element)\n deltas.append(time.time() - start)\n except errors.OutOfRangeError:\n pass\n\n median_wall_time = np.median(deltas)\n print(\"Slice/repeat/batch with sess.run() input size: %d batch size: %d \"\n \"Median wall time per element: %f\" % (input_size, batch_size,\n median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name=\"benchmark_slice_repeat_batch_input_%d_batch_%d\" % (input_size,\n batch_size))\n\n def benchmarkSliceRepeatBatchCallable(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n dataset_ops.Dataset.from_tensor_slices(input_data)\n .repeat(num_epochs + 1).batch(batch_size))\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with session.Session() as sess:\n self.evaluate(iterator.initializer)\n get_next_element = sess.make_callable(next_element)\n # Run one whole epoch to burn in the computation.\n for _ in range(input_size // batch_size):\n get_next_element()\n deltas = []\n try:\n while True:\n start = time.time()\n get_next_element()\n deltas.append(time.time() - start)\n except errors.OutOfRangeError:\n pass\n\n median_wall_time = np.median(deltas)\n print(\n \"Slice/repeat/batch with callable input size: %d batch size: %d Median\"\n \" wall time per element: %f\" % (input_size, batch_size,\n median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name=\"benchmark_slice_repeat_batch_callable_input_%d_batch_%d\" %\n (input_size, batch_size))\n\n def benchmarkReshapeSliceRepeatCallable(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n dataset_ops.Dataset.from_tensor_slices(input_data.reshape(100, 100))\n .repeat(num_epochs + 1))\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with session.Session() as sess:\n self.evaluate(iterator.initializer)\n get_next_element = sess.make_callable(next_element)\n # Run one whole epoch to burn in the computation.\n for _ in range(input_size // batch_size):\n get_next_element()\n deltas = []\n try:\n while True:\n start = time.time()\n get_next_element()\n deltas.append(time.time() - start)\n except errors.OutOfRangeError:\n pass\n\n median_wall_time = np.median(deltas)\n print(\"Reshape/slice/repeat with callable input size: %d batch size: %d \"\n \"Median wall time per element: %f\" % (input_size, batch_size,\n median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name=\"benchmark_reshape_slice_repeat_callable_input_%d_batch_%d\" %\n (input_size, batch_size))\n\n def benchmarkSliceBatchCacheRepeatCallable(self):\n input_size = 10000\n batch_size = 100\n num_epochs = 100\n\n input_data = np.random.randn(input_size)\n\n dataset = (\n dataset_ops.Dataset.from_tensor_slices(input_data).batch(batch_size)\n .cache().repeat(num_epochs + 1))\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with session.Session() as sess:\n self.evaluate(iterator.initializer)\n get_next_element = sess.make_callable(next_element)\n # Run one whole epoch to burn in the computation.\n for _ in range(input_size // batch_size):\n get_next_element()\n deltas = []\n try:\n while True:\n start = time.time()\n get_next_element()\n deltas.append(time.time() - start)\n except errors.OutOfRangeError:\n pass\n\n median_wall_time = np.median(deltas)\n print(\n \"Slice/batch/cache/repeat with callable input size: %d batch size: %d \"\n \"Median wall time per element: %f\"\n % (input_size, batch_size, median_wall_time))\n self.report_benchmark(\n iters=len(deltas),\n wall_time=median_wall_time,\n name=\"benchmark_slice_batch_cache_repeat_callable_input_%d_batch_%d\" %\n (input_size, batch_size))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.sleep()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom tensorflow.python.data.experimental.ops import sleep\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import test\n\n_NUMPY_RANDOM_SEED = 42\n\n\nclass SleepTest(test_base.DatasetTestBase):\n\n def testSleep(self):\n sleep_microseconds = 100\n dataset = dataset_ops.Dataset.range(10).apply(\n sleep.sleep(sleep_microseconds))\n iterator = dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n self.evaluate(iterator.initializer)\n start_time = time.time()\n for i in range(10):\n self.assertEqual(i, self.evaluate(next_element))\n end_time = time.time()\n self.assertGreater(end_time - start_time, (10 * sleep_microseconds) / 1e6)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for cross_device_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as pycoll\nimport threading\n\nfrom tensorflow.python.distribute import all_reduce\nfrom tensorflow.python.distribute import values as value_lib\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nccl_ops\n\n\ndef aggregate_gradients_using_nccl(replica_grads):\n \"\"\"Aggregate gradients using nccl allreduce.\"\"\"\n agg_all_g_and_v = []\n for single_g_and_v in zip(*replica_grads):\n single_grads = [g for g, _ in single_g_and_v]\n agg_grads = nccl_ops.all_sum(single_grads)\n agg_all_g_and_v.append(\n [(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])\n\n agg_all_g_and_v = list(zip(*agg_all_g_and_v))\n\n return agg_all_g_and_v\n\n\ndef aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):\n \"\"\"Aggregate gradients using hierarchical copies.\n\n Args:\n avail_devices: available GPU devices.\n replica_grads: List of lists of (gradient, variable) tuples. The outer list\n is over replicas. The inner list is over individual gradients.\n\n Returns:\n The list of (aggregated_gradient, variable), where the gradient has been\n summed across all replicas and the variable is chosen from the first\n replica.\n \"\"\"\n # This only works for DGX-1 type of machine topology\n # Device peer to peer matrix\n # DMA: 0 1 2 3 4 5 6 7\n # 0: Y Y Y Y Y N N N\n # 1: Y Y Y Y N Y N N\n # 2: Y Y Y Y N N Y N\n # 3: Y Y Y Y N N N Y\n # 4: Y N N N Y Y Y Y\n # 5: N Y N N Y Y Y Y\n # 6: N N Y N Y Y Y Y\n # 7: N N N Y Y Y Y Y\n agg_grads = []\n num_devices = len(avail_devices)\n # In the special case of DGX-1 machine topology, the two groups have equal\n # size.\n group_size = num_devices // 2\n for i, single_grads in enumerate(zip(*replica_grads)):\n group_0_main_device = i % num_devices\n group_1_main_device = (group_0_main_device + group_size) % num_devices\n if group_0_main_device < group_size:\n group_0_begin = 0\n group_1_begin = group_size\n else:\n group_0_begin = group_size\n group_1_begin = 0\n\n # Aggregate the first group.\n group_0_device_grads = single_grads[group_0_begin:\n group_0_begin + group_size]\n with ops.device(avail_devices[group_0_main_device]):\n group_0_agg_grads, _ = aggregate_single_gradient_using_copy(\n group_0_device_grads, False, False)\n\n # Aggregate the second group.\n group_1_device_grads = single_grads[group_1_begin:\n group_1_begin + group_size]\n with ops.device(avail_devices[group_1_main_device]):\n group_1_agg_grads, _ = aggregate_single_gradient_using_copy(\n group_1_device_grads, False, False)\n\n # Aggregate between the groups.\n with ops.device(avail_devices[group_0_main_device]):\n (agg_total_grads, _), _ = aggregate_single_gradient_using_copy(\n [group_0_agg_grads, group_1_agg_grads], False, False)\n\n # Broadcast the result back into the root of each group.\n with ops.device(avail_devices[group_0_main_device]):\n group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)\n with ops.device(avail_devices[group_1_main_device]):\n group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)\n\n agg_grads_bcast = []\n for j in range(len(single_grads)):\n with ops.device(avail_devices[j]):\n # Broadcast the result back to each member in the group from the root.\n if (group_0_main_device < group_size) == (j < group_size):\n src_device_grad = group_0_agg_grads_bcast\n else:\n src_device_grad = group_1_agg_grads_bcast\n agg_grads_bcast.append(array_ops.identity(src_device_grad))\n\n agg_grads.append(\n [(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])\n\n agg_grads = list(zip(*agg_grads))\n\n return agg_grads\n\n\ndef aggregate_single_gradient_using_copy(grad_and_vars, use_mean,\n check_inf_nan):\n \"\"\"Calculate the average gradient for a shared variable across all replicas.\n\n Note that this function provides a synchronization point across all replicas.\n\n Args:\n grad_and_vars: A list or tuple of (gradient, variable) tuples. Each\n (gradient, variable) pair within the outer list represents the gradient\n of the variable calculated for a single replica, and the number of pairs\n equals the number of replicas.\n use_mean: if True, mean is taken, else sum of gradients is taken.\n check_inf_nan: check grads for nans and infs.\n\n Returns:\n The tuple ([(average_gradient, variable),], has_nan_or_inf) where the\n gradient has been averaged across all replicas. The variable is chosen\n from the first replica. The has_nan_or_inf indicates the grads has nan or\n inf.\n \"\"\"\n grads = [g for g, _ in grad_and_vars]\n grad = math_ops.add_n(grads)\n\n if use_mean and len(grads) > 1:\n grad = array_ops.multiply(grad, 1.0 / len(grads))\n\n v = grad_and_vars[0][1]\n if check_inf_nan:\n has_nan_or_inf = array_ops.logical_not(\n array_ops.reduce_all(array_ops.is_finite(grads)))\n return (grad, v), has_nan_or_inf\n else:\n return (grad, v), None\n\n\ndef group_device_names(devices, group_size):\n \"\"\"Group device names into groups of group_size.\n\n Args:\n devices: a list of canonical device strings.\n group_size: integer which is equal to or greater than 1.\n\n Returns:\n list of lists of devices, where each inner list is group_size long,\n and each device appears at least once in an inner list. If\n len(devices) % group_size == 0 then each device will appear exactly once.\n\n Raises:\n ValueError: if group_size > len(devices)\n \"\"\"\n num_devices = len(devices)\n if group_size > num_devices:\n raise ValueError(\n 'only %d devices, but group_size=%d' % (num_devices, group_size))\n num_groups = (\n num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))\n groups = [[] for i in range(num_groups)]\n for i in range(num_groups * group_size):\n groups[i % num_groups].append(devices[i % num_devices])\n return groups\n\n\ndef split_grads_by_size(threshold_size, device_grads):\n \"\"\"Break gradients into two sets according to tensor size.\n\n Args:\n threshold_size: int size cutoff for small vs large tensor.\n device_grads: List of lists of (gradient, variable) tuples. The outer\n list is over devices. The inner list is over individual gradients.\n\n Returns:\n small_grads: Subset of device_grads where shape is <= threshold_size\n elements.\n large_grads: Subset of device_grads where shape is > threshold_size\n elements.\n \"\"\"\n small_grads = []\n large_grads = []\n for dl in device_grads:\n small_dl = []\n large_dl = []\n for (g, v) in dl:\n tensor_size = g.get_shape().num_elements()\n if tensor_size <= threshold_size:\n small_dl.append([g, v])\n else:\n large_dl.append([g, v])\n if small_dl:\n small_grads.append(small_dl)\n if large_dl:\n large_grads.append(large_dl)\n return small_grads, large_grads\n\n\n# threading.Lock() and threading.local() cannot be pickled and therefore cannot\n# be a field of CollectiveKeys. Right now _thread_local is not necessary to be\n# an instance member of CollectiveKeys since we always create a new thread for\n# each replica.\n_lock = threading.Lock()\n_thread_local = threading.local()\n\n\n# TODO(yuefengz): use random key starts to avoid reusing keys?\nclass CollectiveKeys(object):\n \"\"\"Class that manages collective keys.\n\n We need to manage three different keys for collective:\n\n *Group key*: an integer key to identify the set of cooperative devices.\n Collective ops work under the same set of devices must using the same group\n key.\n\n *Instance key*: an integer key to identify the set of same counterpart of\n tensors on different devices in a device group that need to be all-reduced.\n\n \"Graph key\": an integer key that is unique key graph. This is used to support\n multiple graphs per client session. It must be non-zero and set in the\n `config` argument of each call to `session.run`.\n \"\"\"\n\n def __init__(self,\n group_key_start=1,\n instance_key_start=100,\n instance_key_with_id_start=10000):\n \"\"\"Initializes the object.\n\n Args:\n group_key_start: the starting integer of group key.\n instance_key_start: the starting integer of instance key.\n instance_key_with_id_start: the starting integer of instance key that is\n recorded with an id.\n \"\"\"\n self._group_key = group_key_start\n self._group_key_table = dict()\n\n # For instance keys with ids\n self._instance_key_id_to_key_table = dict()\n self._instance_key_with_id_counter = instance_key_with_id_start\n\n # For instance keys without ids\n self._instance_key_start = instance_key_start\n\n def _get_thread_local_object(self):\n # We make instance key without key ids thread local so that it will work\n # with MirroredStrategy and distribute coordinator.\n if not hasattr(_thread_local, 'instance_key'):\n _thread_local.instance_key = self._instance_key_start\n return _thread_local\n\n def get_group_key(self, devices):\n \"\"\"Returns a group key for the set of devices.\n\n Args:\n devices: list of strings naming devices in a collective group.\n\n Returns:\n int key uniquely identifying the set of device names.\n \"\"\"\n parsed = [pydev.DeviceSpec.from_string(d) for d in devices]\n # In the between-graph replicated training, different workers need to get\n # the same device key. So we remove the task_type and task_id from the\n # devices.\n # TODO(yuefengz): in the in-graph replicated training, we need to include\n # task_type and task_id.\n names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])\n key_id = ','.join(names)\n with _lock:\n if key_id not in self._group_key_table:\n new_key = self._group_key\n self._group_key += 1\n self._group_key_table[key_id] = new_key\n return self._group_key_table[key_id]\n\n def get_instance_key(self, key_id=None):\n \"\"\"Returns a new instance key for use in defining a collective op.\n\n Args:\n key_id: optional string. If set, key will be recorded and the same key\n will be returned when the same key_id is provided. If not, an increasing\n instance key will be returned.\n \"\"\"\n if key_id:\n with _lock:\n if key_id not in self._instance_key_id_to_key_table:\n self._instance_key_with_id_counter += 1\n self._instance_key_id_to_key_table[key_id] = (\n self._instance_key_with_id_counter)\n return self._instance_key_id_to_key_table[key_id]\n else:\n v = self._get_thread_local_object().instance_key\n self._get_thread_local_object().instance_key += 1\n return v\n\n\ndef build_collective_reduce(input_tensors,\n num_workers,\n collective_keys,\n reduction_op='Add',\n unary_op='Id'):\n \"\"\"Build a subgraph that does one full all-reduce, using the collective Op.\n\n Args:\n input_tensors: tensors within a single worker graph that are to be reduced\n together; must be one per device.\n num_workers: total number of workers with identical independent graphs that\n will be doing this same reduction. The reduction will actually include\n the corresponding tensors at all these workers.\n collective_keys: a CollectiveKeys object.\n reduction_op: string naming the reduction op.\n unary_op: string naming the unary final op.\n\n Returns:\n An array of final tensors, one per device, computed by the full reduction.\n\n Raises:\n ValueError: There must be at least two tensors over all the workers.\n \"\"\"\n group_size = len(input_tensors) * num_workers\n if group_size < 2:\n raise ValueError('num_workers * len(input_tensors) must be 2 or greater')\n devices = [t.device for t in input_tensors]\n num_devices = len(devices)\n group_key = collective_keys.get_group_key(devices)\n instance_key = collective_keys.get_instance_key()\n out_tensors = []\n subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec\n for d in range(num_devices):\n with ops.device(devices[d]):\n reduce_op = collective_ops.all_reduce(\n input_tensors[d], group_size, group_key, instance_key, reduction_op,\n unary_op, subdiv_offsets)\n out_tensors.append(reduce_op)\n return out_tensors\n\n\ndef sum_grad_and_var_all_reduce(grad_and_vars,\n num_workers,\n alg,\n gpu_indices,\n aux_devices=None,\n num_shards=1):\n \"\"\"Apply all-reduce algorithm over specified gradient tensors.\"\"\"\n with ops.name_scope('allreduce'):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n scaled_grads = [g for g, _ in grad_and_vars]\n if alg == 'nccl':\n summed_grads = nccl_ops.all_sum(scaled_grads)\n elif alg == 'xring':\n summed_grads = all_reduce.build_ring_all_reduce(\n scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)\n elif alg == 'nccl/xring':\n summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,\n math_ops.add)\n elif alg == 'nccl/rechd':\n summed_grads = all_reduce.build_nccl_then_recursive_hd(\n scaled_grads, math_ops.add)\n elif alg == 'nccl/pscpu':\n summed_grads = all_reduce.build_nccl_then_shuffle(\n scaled_grads, aux_devices, math_ops.add, math_ops.add_n)\n elif alg == 'pscpu/pscpu':\n second_gather_devices = aux_devices[:num_shards]\n summed_grads = all_reduce.build_shuffle_then_shuffle(\n scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)\n elif alg in ['pscpu', 'psgpu']:\n summed_grads = all_reduce.build_shuffle_all_reduce(\n scaled_grads, aux_devices, math_ops.add_n)\n else:\n raise ValueError('unsupported all_reduce alg: ', alg)\n\n result = []\n for (_, v), g in zip(grad_and_vars, summed_grads):\n result.append([g, v])\n return result\n\n\ndef sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,\n num_shards, gpu_indices):\n \"\"\"Apply all-reduce algorithm over specified gradient tensors.\n\n Args:\n dev_prefixes: list of prefix strings to use to generate PS device names.\n replica_grads: the gradients to reduce.\n num_workers: number of worker processes across entire job.\n alg: the all-reduce algorithm to apply.\n num_shards: alg-specific sharding factor.\n gpu_indices: indices of local GPUs in order usable for ring-reduce.\n\n Returns:\n list of reduced tensors\n \"\"\"\n alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])\n is_hierarchical = '/' in alg\n if 'pscpu' in alg:\n aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]\n elif 'psgpu' in alg:\n aux_devices = [\n prefix + '/gpu:%d' % i\n for i in range(len(gpu_indices))\n for prefix in dev_prefixes\n ]\n else:\n aux_devices = ['/job:localhost/cpu:0']\n # Auxiliary devices for hierarchical all-reduces.\n aux_device_groups = group_device_names(\n aux_devices, num_shards if alg_contains_shuffle else 1)\n group_index = 0\n reduced_gv_list = []\n for grad_and_vars in zip(*replica_grads):\n reduced_gv_list.append(\n sum_grad_and_var_all_reduce(\n grad_and_vars, num_workers, alg, gpu_indices, aux_devices\n if is_hierarchical else aux_device_groups[group_index], num_shards))\n group_index = (group_index + 1) % len(aux_device_groups)\n new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]\n return new_replica_grads\n\n\ndef extract_ranges(index_list, range_size_limit=32):\n \"\"\"Extract consecutive ranges and singles from index_list.\n\n Args:\n index_list: List of monotone increasing non-negative integers.\n range_size_limit: Largest size range to return. If a larger\n consecutive range exists, it will be returned as multiple\n ranges.\n\n Returns:\n (ranges, singles) where ranges is a list of [first, last] pairs of\n consecutive elements in index_list, and singles is all of the\n other elements, in original order.\n \"\"\"\n if not index_list:\n return [], []\n first = index_list[0]\n last = first\n ranges = []\n singles = []\n for i in index_list[1:]:\n if i == last + 1 and (last - first) <= range_size_limit:\n last = i\n else:\n if last > first:\n ranges.append([first, last])\n else:\n singles.append(first)\n first = i\n last = i\n if last > first:\n ranges.append([first, last])\n else:\n singles.append(first)\n return ranges, singles\n\n\nGradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')\n\n\ndef pack_range(key, packing, grad_vars, rng):\n \"\"\"Form the concatenation of a specified range of gradient tensors.\n\n Args:\n key: Value under which to store meta-data in packing that will be used\n later to restore the grad_var list structure.\n packing: Dict holding data describing packed ranges of small tensors.\n grad_vars: List of (grad, var) pairs for one replica.\n rng: A pair of integers giving the first, last indices of a consecutive\n range of tensors to be packed.\n\n Returns:\n A tensor that is the concatenation of all the specified small tensors.\n \"\"\"\n to_pack = grad_vars[rng[0]:rng[1] + 1]\n members = []\n variables = []\n restore_shapes = []\n with ops.name_scope('pack'):\n for g, v in to_pack:\n variables.append(v)\n restore_shapes.append(g.shape)\n with ops.device(g.device):\n members.append(array_ops.reshape(g, [-1]))\n packing[key] = GradPackTuple(\n indices=range(rng[0], rng[1] + 1),\n vars=variables,\n shapes=restore_shapes)\n with ops.device(members[0].device):\n return array_ops.concat(members, 0)\n\n\ndef unpack_grad_tuple(gv, gpt):\n \"\"\"Unpack a previously packed collection of gradient tensors.\n\n Args:\n gv: A (grad, var) pair to be unpacked.\n gpt: A GradPackTuple describing the packing operation that produced gv.\n\n Returns:\n A list of (grad, var) pairs corresponding to the values that were\n originally packed into gv, maybe following subsequent operations like\n reduction.\n \"\"\"\n elt_widths = [x.num_elements() for x in gpt.shapes]\n with ops.device(gv[0][0].device):\n with ops.name_scope('unpack'):\n splits = array_ops.split(gv[0], elt_widths)\n unpacked_gv = []\n for idx, s in enumerate(splits):\n unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),\n gpt.vars[idx]))\n return unpacked_gv\n\n\ndef pack_small_tensors(replica_grads, max_bytes=0, max_group=0):\n \"\"\"Concatenate small gradient tensors together for reduction.\n\n Args:\n replica_grads: List of lists of (gradient, variable) tuples.\n max_bytes: Int giving max number of bytes in a tensor that\n may be considered small.\n max_group: Int giving max number of small tensors that may be\n concatenated into one new tensor.\n\n Returns:\n new_replica_grads, packing where new_replica_grads is identical to\n replica_grads except that all feasible small_tensors have been removed\n from their places and concatenated into larger tensors that are\n now in the front of the list for each replica, and packing contains\n the data necessary to restore the replica_grads structure.\n\n Look through the first replica for gradients of the same type (float),\n and small size, that are all sequential. For each such group,\n replace by a new tensor that is a flattened concatenation. Note\n that the corresponding variable will be absent, which doesn't matter\n because it isn't used during all-reduce.\n\n Requires:\n Every gv_list in replicas must have isomorphic structure including identical\n tensor sizes and types.\n \"\"\"\n small_indices = []\n large_indices = []\n for idx, (g, _) in enumerate(replica_grads[0]):\n if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:\n small_indices.append(idx)\n else:\n large_indices.append(idx)\n small_ranges, small_singles = extract_ranges(\n small_indices, range_size_limit=max_group)\n large_indices = sorted(large_indices + small_singles)\n num_gv = len(replica_grads[0])\n packing = {}\n if small_ranges:\n new_replica_grads = []\n for dev_idx, gv_list in enumerate(replica_grads):\n assert len(gv_list) == num_gv\n new_gv_list = []\n for r in small_ranges:\n key = '%d:%d' % (dev_idx, len(new_gv_list))\n new_gv_list.append((pack_range(key, packing, gv_list, r),\n 'packing_var_placeholder'))\n for i in large_indices:\n new_gv_list.append(gv_list[i])\n new_replica_grads.append(new_gv_list)\n return new_replica_grads, packing\n else:\n return replica_grads, None\n\n\ndef unpack_small_tensors(replica_grads, packing):\n \"\"\"Undo the structure alterations to replica_grads done by pack_small_tensors.\n\n Args:\n replica_grads: List of List of (grad, var) tuples.\n packing: A dict generated by pack_small_tensors describing the changes\n it made to replica_grads.\n\n Returns:\n new_replica_grads: identical to replica_grads except that concatenations\n of small tensors have been split apart and returned to their original\n positions, paired with their original variables.\n \"\"\"\n if not packing:\n return replica_grads\n new_replica_grads = []\n num_devices = len(replica_grads)\n num_packed = len(packing.keys()) // num_devices\n for dev_idx, gv_list in enumerate(replica_grads):\n gv_list = list(gv_list)\n new_gv_list = gv_list[num_packed:]\n for i in range(num_packed):\n k = '%d:%d' % (dev_idx, i)\n gpt = packing[k]\n gv = unpack_grad_tuple(gv_list[i], gpt)\n for gi, idx in enumerate(gpt.indices):\n assert idx == gpt.indices[gi]\n new_gv_list.insert(idx, gv[gi])\n new_replica_grads.append(new_gv_list)\n return new_replica_grads\n\n\ndef aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):\n \"\"\"Aggregate tensors using `accumulation_fn` and IndexedSlices via concat.\"\"\"\n if any(isinstance(v, ops.IndexedSlices) for v in values):\n return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access\n else:\n return accumulation_fn(values)\n\n\ndef divide_by_n_tensors_or_indexed_slices(value, n):\n if isinstance(value, ops.IndexedSlices):\n value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access\n return ops.IndexedSlices(\n value.values / n, value.indices, value.dense_shape)\n else:\n return value / n\n\n\ndef copy_tensor_or_indexed_slices_to_device(value, device):\n with ops.device(device):\n if isinstance(value, ops.IndexedSlices):\n copied_values = array_ops.identity(value.values)\n copied_indices = array_ops.identity(value.indices)\n copied_shape = array_ops.identity(value.dense_shape)\n result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)\n else:\n result = array_ops.identity(value)\n return result\n\n\ndef contains_indexed_slices(value):\n \"\"\"Check whether the value is `IndexedSlices` or contains `IndexedSlices`.\"\"\"\n if isinstance(value, ops.IndexedSlices):\n return True\n elif isinstance(value, (list, tuple)) and value:\n return any(contains_indexed_slices(v) for v in value)\n elif isinstance(value, value_lib.DistributedValues):\n return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access\n else:\n return False\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ragged.map_inner_values.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import ragged\nfrom tensorflow.python.platform import googletest\n\n\nclass RaggedMapInnerValuesOpTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertRaggedMapInnerValuesReturns(self,\n op,\n expected,\n args=(),\n kwargs=None):\n kwargs = kwargs or {}\n result = ragged.map_inner_values(op, *args, **kwargs)\n with self.test_session():\n self.assertEqual(result.eval().tolist(), expected)\n\n def testDocStringExamples(self):\n \"\"\"Test the examples in apply_op_to_ragged_values.__doc__.\"\"\"\n rt = ragged.constant([[1, 2, 3], [], [4, 5], [6]])\n v1 = ragged.map_inner_values(array_ops.ones_like, rt)\n v2 = ragged.map_inner_values(math_ops.multiply, rt, rt)\n v3 = ragged.map_inner_values(math_ops.add, rt, 5)\n with self.test_session():\n self.assertEqual(v1.eval().tolist(), [[1, 1, 1], [], [1, 1], [1]])\n self.assertEqual(v2.eval().tolist(), [[1, 4, 9], [], [16, 25], [36]])\n self.assertEqual(v3.eval().tolist(), [[6, 7, 8], [], [9, 10], [11]])\n\n def testOpWithSingleRaggedTensorArg(self):\n tensor = ragged.constant([[1, 2, 3], [], [4, 5]])\n self.assertRaggedMapInnerValuesReturns(\n op=array_ops.zeros_like,\n args=(tensor,),\n expected=[[0, 0, 0], [], [0, 0]])\n\n def testOpWithTwoRaggedTensorArgs(self):\n x = ragged.constant([[3, 1, 4], [], [1, 5]])\n y = ragged.constant([[1, 2, 3], [], [4, 5]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply, args=(x, y), expected=[[3, 2, 12], [], [4, 25]])\n\n def testOpWithRaggedTensorAndScalarArgs(self):\n y = ragged.constant([[1, 2, 3], [], [4, 5]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply, args=(5, y), expected=[[5, 10, 15], [], [20, 25]])\n\n def testOpWithThreeRaggedTensorArgs(self):\n condition = ragged.constant(\n [[True, True, False], [], [True, False]]) # pyformat: disable\n x = ragged.constant([['a', 'b', 'c'], [], ['d', 'e']])\n y = ragged.constant([['A', 'B', 'C'], [], ['D', 'E']])\n self.assertRaggedMapInnerValuesReturns(\n op=array_ops.where,\n args=(condition, x, y),\n expected=[[b'a', b'b', b'C'], [], [b'd', b'E']])\n\n def testOpWithRaggedTensorListArg(self):\n x = ragged.constant([[1, 2, 3], [], [4, 5]])\n y = ragged.constant([[10, 20, 30], [], [40, 50]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.add_n,\n args=([x, y, x],),\n expected=[[12, 24, 36], [], [48, 60]])\n\n def testOpWithKeywordArgs(self):\n x = ragged.constant([[3, 1, 4], [], [1, 5]])\n y = ragged.constant([[1, 2, 3], [], [4, 5]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply,\n kwargs=dict(x=x, y=y),\n expected=[[3, 2, 12], [], [4, 25]])\n\n def testOpWithMixedPositionalAndKeywordArgs(self):\n x = ragged.constant([[3, 1, 4], [], [1, 5]])\n y = ragged.constant([[1, 2, 3], [], [4, 5]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply,\n args=(x,),\n kwargs=dict(y=y),\n expected=[[3, 2, 12], [], [4, 25]])\n\n def testNonElementWiseOp(self):\n x = ragged.constant(\n [[[3, 1, 4], [1, 5, 9], [2, 6, 5]], [], [[3, 5, 8], [9, 7, 9]]],\n ragged_rank=1)\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.reduce_sum,\n kwargs={\n 'input_tensor': x,\n 'axis': 1,\n },\n expected=[[8, 15, 13], [], [16, 25]])\n\n def testOpWithRaggedRankGreaterThanOne(self):\n # ragged_rank=0\n x0 = [3, 1, 4, 1, 5, 9, 2, 6, 5]\n y0 = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n with self.test_session():\n self.assertEqual(\n math_ops.multiply(x0, y0).eval().tolist(),\n [3, 2, 12, 4, 25, 54, 14, 48, 45])\n\n # ragged_rank=1\n x1 = ragged.constant([[3, 1, 4], [], [1, 5], [9, 2], [6, 5]])\n y1 = ragged.constant([[1, 2, 3], [], [4, 5], [6, 7], [8, 9]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply,\n args=(x1, y1),\n expected=[[3, 2, 12], [], [4, 25], [54, 14], [48, 45]])\n\n # ragged_rank=2\n x2 = ragged.constant([[[3, 1, 4]], [], [[], [1, 5]], [[9, 2], [6, 5]]])\n y2 = ragged.constant([[[1, 2, 3]], [], [[], [4, 5]], [[6, 7], [8, 9]]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply,\n args=(x2, y2),\n expected=[[[3, 2, 12]], # row 0\n [], # row 1\n [[], [4, 25]], # row 2\n [[54, 14], [48, 45]] # row 3\n ]) # pyformat: disable\n\n # ragged_rank=3\n x3 = ragged.constant([[[[3, 1, 4]], []], [], [[[], [1, 5]]],\n [[[9, 2], [6, 5]]]])\n y3 = ragged.constant([[[[1, 2, 3]], []], [], [[[], [4, 5]]],\n [[[6, 7], [8, 9]]]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply,\n args=(x3, y3),\n expected=[\n [[[3, 2, 12]], []], # row 0\n [], # row 1\n [[[], [4, 25]]], # row 2\n [[[54, 14], [48, 45]]] # row 3\n ]) # pyformat: disable\n\n def testOpWithRaggedRankThree(self):\n x = ragged.constant([[[3, 1, 4]], [], [[], [1, 5]]])\n y = ragged.constant([[[1, 2, 3]], [], [[], [4, 5]]])\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply,\n args=(x, y),\n expected=[[[3, 2, 12]], [], [[], [4, 25]]])\n\n def testOpWithInnerValuesOnly(self):\n x = constant_op.constant([[1, 2], [3, 4], [5, 6]])\n y = constant_op.constant(2)\n self.assertRaggedMapInnerValuesReturns(\n op=math_ops.multiply, args=(x, y), expected=[[2, 4], [6, 8], [10, 12]])\n\n def testRaggedTensorSplitsRaggedRankMismatchError(self):\n x = ragged.constant([[3, 1, 4], [], [1, 5]])\n y = ragged.constant([[[3, 1, 4], []], [], [[1, 5]]])\n self.assertRaisesRegexp(ValueError,\n r'Inputs must have identical ragged splits.*',\n ragged.map_inner_values, math_ops.add, x, y)\n\n def testRaggedTensorSplitsValueMismatchError(self):\n x = ragged.constant([[3, 1, 4], [], [1, 5]])\n y = ragged.constant([[1], [2, 3], [4, 5]])\n self.assertRaisesRegexp(errors.InvalidArgumentError,\n r'Inputs must have identical ragged splits.*',\n ragged.map_inner_values, math_ops.add, x, y)\n\n def testRaggedTensorSplitsMismatchErrorAtRuntime(self):\n splits1 = array_ops.placeholder_with_default(\n constant_op.constant([0, 3, 3, 5], dtypes.int64), None)\n splits2 = array_ops.placeholder_with_default(\n constant_op.constant([0, 1, 3, 5], dtypes.int64), None)\n x = ragged.from_row_splits([3, 1, 4, 1, 5], splits1)\n y = ragged.from_row_splits([1, 2, 3, 4, 5], splits2)\n result = ragged.map_inner_values(math_ops.add, x, y)\n with self.test_session():\n self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[Inputs must have identical ragged splits\\] '\n r'\\[Condition x == y did not hold element-wise:\\].*', result.eval)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.ragged.batch_gather.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import ragged\nfrom tensorflow.python.platform import googletest\n\n\nclass RaggedBatchGatherOpTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n @parameterized.parameters([\n #=========================================================================\n # Docstring Example\n #=========================================================================\n dict(\n descr='Docstring example',\n params=ragged.constant_value([['a', 'b', 'c'], ['d'], [], ['e']]),\n indices=ragged.constant_value([[1, 2, 0], [], [], [0, 0]]),\n expected=ragged.constant_value([[b'b', b'c', b'a'], [], [],\n [b'e', b'e']])),\n #=========================================================================\n # 0 Batch Dimensions\n #=========================================================================\n dict(\n descr='params: [P1], indices: [I], result: [I]',\n params=['a', 'b', 'c', 'd'],\n indices=[3, 2],\n expected=[b'd', b'c']),\n dict(\n descr='params: [P1, (P2)], indices: [I], result: [I, (P2)]',\n params=ragged.constant_value([['a', 'b'], [], ['c'], ['d', 'e']]),\n indices=[3, 2],\n expected=ragged.constant_value([[b'd', b'e'], [b'c']])),\n #=========================================================================\n # 1 Batch Dimension\n #=========================================================================\n dict(\n descr='params: [B1, P1], indices: [B1, I], result: [B1, I]',\n params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],\n indices=[[2, 0], [0, 1], [1, 0]],\n expected=[[b'c', b'a'], [b'd', b'e'], [b'h', b'g']]),\n dict(\n descr='params: [B1, (P1)], indices: [B1, I], result: [B1, I]',\n params=ragged.constant_value([['a', 'b', 'c'], ['d', 'e'], ['g']]),\n indices=[[2, 0], [0, 1], [0, 0]],\n expected=[[b'c', b'a'], [b'd', b'e'], [b'g', b'g']]),\n dict(\n descr='params: [B1, P1], indices: [B1, (I)], result: [B1, (I)]',\n params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],\n indices=ragged.constant_value([[2, 0, 2], [0], [1]]),\n expected=ragged.constant_value([[b'c', b'a', b'c'], [b'd'], [b'h']])),\n dict(\n descr=('params: [B1, (P1), (P2), P3], indices: [B1, I], '\n 'result: [B1, I, (P2), P3]'),\n params=ragged.constant_value(\n [[[['a']], [['b'], ['c']]], [[['d'], ['e']], [['f']]], [[['g']]]],\n ragged_rank=2),\n indices=[[1, 0], [0, 1], [0, 0]],\n expected=ragged.constant_value(\n [[[[b'b'], [b'c']], [[b'a']]], [[[b'd'], [b'e']], [[b'f']]],\n [[[b'g']], [[b'g']]]],\n ragged_rank=2)),\n #=========================================================================\n # 2 Batch Dimensions\n #=========================================================================\n dict(\n descr=('params: [B1, B2, P1], indices: [B1, B2, I], '\n 'result: [B1, B2, I]'),\n params=[[['a', 'b', 'c']], [['d', 'e', 'f']], [['g', 'h', 'i']]],\n indices=[[[2, 0]], [[0, 1]], [[1, 0]]],\n expected=[[[b'c', b'a']], [[b'd', b'e']], [[b'h', b'g']]]),\n dict(\n descr=('params: [B1, (B2), P1], indices: [B1, (B2), I], '\n 'result: [B1, (B2), I]'),\n params=ragged.constant_value(\n [[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]],\n ragged_rank=1),\n indices=ragged.constant_value([[[2, 0], [0, 1]], [[1, 0]]],\n ragged_rank=1),\n expected=ragged.constant_value(\n [[[b'c', b'a'], [b'd', b'e']], [[b'h', b'g']]], ragged_rank=1)),\n dict(\n descr=('params: [B1, (B2), (P1)], indices: [B1, (B2), I], '\n 'result: [B1, (B2), I]'),\n params=ragged.constant_value([[['a', 'b', 'c'], ['d']], [['e', 'f']]],\n ragged_rank=2),\n indices=ragged.constant_value([[[2, 0], [0, 0]], [[1, 0]]],\n ragged_rank=1),\n expected=ragged.constant_value(\n [[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]], ragged_rank=1)),\n dict(\n descr=('params: [B1, (B2), P1], indices: [B1, (B2), (I)], '\n 'result: [B1, (B2), (I)]'),\n params=ragged.constant_value(\n [[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]],\n ragged_rank=1),\n indices=ragged.constant_value([[[2, 1, 0], [0]], [[1, 1]]],\n ragged_rank=2),\n expected=ragged.constant_value(\n [[[b'c', b'b', b'a'], [b'd']], [[b'h', b'h']]], ragged_rank=2)),\n #=========================================================================\n # 3 Batch Dimensions\n #=========================================================================\n dict(\n descr=(\n 'params: [B1, (B2), (B3), (P1)], indices: [B1, (B2), (B3), I], '\n 'result: [B1, (B2), (B3), I]'),\n params=ragged.constant_value(\n [[[['a', 'b', 'c'], ['d']], [['e', 'f']]]], ragged_rank=3),\n indices=ragged.constant_value([[[[2, 0], [0, 0]], [[1, 0]]]],\n ragged_rank=2),\n expected=ragged.constant_value(\n [[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]]], ragged_rank=2)),\n ])\n def testRaggedBatchGather(self, descr, params, indices, expected):\n result = ragged.batch_gather(params, indices)\n self.assertEqual(\n getattr(result, 'ragged_rank', 0), getattr(expected, 'ragged_rank', 0))\n with self.test_session():\n if hasattr(expected, 'tolist'):\n expected = expected.tolist()\n self.assertEqual(result.eval().tolist(), expected)\n\n def testRaggedBatchGatherUnknownRankError(self):\n params = [['a', 'b'], ['c', 'd']]\n indices = array_ops.placeholder(dtypes.int32, shape=None)\n ragged_indices = ragged.from_row_splits(indices, [0, 2, 4])\n\n with self.assertRaisesRegexp(\n ValueError, 'batch_gather does not allow indices with unknown shape.'):\n ragged.batch_gather(params, indices)\n\n with self.assertRaisesRegexp(\n ValueError, 'batch_gather does not allow indices with unknown shape.'):\n ragged.batch_gather(params, ragged_indices)\n\n @parameterized.parameters([\n dict(\n params=ragged.constant([['a'], ['b'], ['c']]),\n indices=ragged.constant([[0], [0]]),\n message='Dimensions 3 and 2 are not compatible'),\n dict(\n params=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n indices=ragged.constant([[[0, 0], [0, 0, 0]], [[0]]]),\n message='batch shape from indices does not match params shape'),\n dict(\n params=ragged.constant([[[0, 0], [0, 0, 0]], [[0]]]),\n indices=ragged.constant([[[0, 0]], [[0, 0, 0]], [[0]]]),\n message='Dimensions must be equal, but are 3 and 4'),\n dict(\n params=ragged.constant([[[0, 0], [0, 0, 0]], [[0]], [[0]]]),\n indices=ragged.constant([[[0, 0]], [[0, 0, 0]], [[0]]]),\n error=errors.InvalidArgumentError,\n message='Condition x == y did not hold element-wise'),\n dict(\n params=ragged.constant(['a', 'b', 'c']),\n indices=ragged.constant([[0], [0]]),\n message='batch shape from indices does not match params shape'),\n dict(params=ragged.constant_value([['a']]),\n indices=0,\n message='indices.rank must be at least 1.'),\n dict(params=ragged.constant_value([['a']]),\n indices=[[[0]]],\n message='batch shape from indices does not match params shape'),\n ])\n def testRaggedBatchGatherStaticError(self,\n params,\n indices,\n message,\n error=ValueError):\n with self.assertRaisesRegexp(error, message):\n ragged.batch_gather(params, indices)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for mfcc_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import spectral_ops_test_util\nfrom tensorflow.python.ops.signal import mfcc_ops\nfrom tensorflow.python.platform import test\n\n\n# TODO(rjryan): We have no open source tests for MFCCs at the moment. Internally\n# at Google, this code is tested against a reference implementation that follows\n# HTK conventions.\nclass MFCCTest(test.TestCase):\n\n def test_error(self):\n # num_mel_bins must be positive.\n with self.assertRaises(ValueError):\n signal = array_ops.zeros((2, 3, 0))\n mfcc_ops.mfccs_from_log_mel_spectrograms(signal)\n\n # signal must be float32\n with self.assertRaises(ValueError):\n signal = array_ops.zeros((2, 3, 5), dtype=dtypes.float64)\n mfcc_ops.mfccs_from_log_mel_spectrograms(signal)\n\n def test_basic(self):\n \"\"\"A basic test that the op runs on random input.\"\"\"\n with spectral_ops_test_util.fft_kernel_label_map():\n with self.session(use_gpu=True):\n signal = random_ops.random_normal((2, 3, 5))\n mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()\n\n def test_unknown_shape(self):\n \"\"\"A test that the op runs when shape and rank are unknown.\"\"\"\n with spectral_ops_test_util.fft_kernel_label_map():\n with self.session(use_gpu=True):\n signal = array_ops.placeholder_with_default(\n random_ops.random_normal((2, 3, 5)), tensor_shape.TensorShape(None))\n self.assertIsNone(signal.shape.ndims)\n mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for CandidateSamplerOp.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import candidate_sampling_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass RangeSamplerOpsTest(test.TestCase):\n\n BATCH_SIZE = 3\n NUM_TRUE = 2\n RANGE = 5\n NUM_SAMPLED = RANGE\n\n TRUE_LABELS = [[1, 2], [0, 4], [3, 3]]\n\n def testTrueCandidates(self):\n with self.cached_session() as sess:\n indices = constant_op.constant([0, 0, 1, 1, 2, 2])\n true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])\n true_candidates_matrix = array_ops.reshape(\n true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])\n indices_val, true_candidates_val = sess.run(\n [indices, true_candidates_matrix])\n\n self.assertAllEqual(indices_val, [0, 0, 1, 1, 2, 2])\n self.assertAllEqual(true_candidates_val, self.TRUE_LABELS)\n\n def testSampledCandidates(self):\n with self.cached_session():\n true_classes = constant_op.constant(\n [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)\n sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(\n true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)\n result = self.evaluate(sampled_candidates)\n\n expected_ids = [0, 1, 2, 3, 4]\n self.assertAllEqual(result, expected_ids)\n self.assertEqual(sampled_candidates.get_shape(), [self.NUM_SAMPLED])\n\n def testTrueLogExpectedCount(self):\n with self.cached_session():\n true_classes = constant_op.constant(\n [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)\n _, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(\n true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)\n true_log_expected_count = math_ops.log(true_expected_count)\n result = self.evaluate(true_log_expected_count)\n\n self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)\n self.assertEqual(true_expected_count.get_shape(),\n [self.BATCH_SIZE, self.NUM_TRUE])\n self.assertEqual(true_log_expected_count.get_shape(),\n [self.BATCH_SIZE, self.NUM_TRUE])\n\n def testSampledLogExpectedCount(self):\n with self.cached_session():\n true_classes = constant_op.constant(\n [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)\n _, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler( # pylint: disable=line-too-long\n true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)\n sampled_log_expected_count = math_ops.log(sampled_expected_count)\n result = self.evaluate(sampled_log_expected_count)\n\n self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)\n self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])\n self.assertEqual(sampled_log_expected_count.get_shape(), [self.NUM_SAMPLED])\n\n def testAccidentalHits(self):\n with self.cached_session() as sess:\n true_classes = constant_op.constant(\n [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)\n sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(\n true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)\n accidental_hits = candidate_sampling_ops.compute_accidental_hits(\n true_classes, sampled_candidates, self.NUM_TRUE)\n indices, ids, weights = self.evaluate(accidental_hits)\n\n self.assertEqual(1, accidental_hits[0].get_shape().ndims)\n self.assertEqual(1, accidental_hits[1].get_shape().ndims)\n self.assertEqual(1, accidental_hits[2].get_shape().ndims)\n for index, id_, weight in zip(indices, ids, weights):\n self.assertTrue(id_ in self.TRUE_LABELS[index])\n self.assertLess(weight, -1.0e37)\n\n def testSeed(self):\n\n def draw(seed):\n with self.cached_session():\n true_classes = constant_op.constant(\n [[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)\n sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(\n true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)\n return self.evaluate(sampled)\n\n # Non-zero seed. Repeatable.\n for seed in [1, 12, 123, 1234]:\n self.assertAllEqual(draw(seed), draw(seed))\n # Seed=0 means random seeds.\n num_same = 0\n for _ in range(10):\n if np.allclose(draw(None), draw(None)):\n num_same += 1\n # Accounts for the fact that the same random seed may be picked\n # twice very rarely.\n self.assertLessEqual(num_same, 2)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ragged.where.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import ragged\nfrom tensorflow.python.platform import googletest\n\n\nclass RaggedWhereOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n @parameterized.parameters([\n #=========================================================================\n # Docstring Examples\n #=========================================================================\n dict( # shape=[D1, (D2)]\n condition=ragged.constant_value([[True, False, True], [False, True]]),\n expected=[[0, 0], [0, 2], [1, 1]]),\n dict( # shape=[D1, (D2)]\n condition=ragged.constant_value([[True, False, True], [False, True]]),\n x=ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]),\n y=ragged.constant_value([['a', 'b', 'c'], ['d', 'e']]),\n expected=ragged.constant_value([[b'A', b'b', b'C'], [b'd', b'E']])),\n dict( # shape=[D1, (D2)]\n condition=ragged.constant_value([True, False]),\n x=ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]),\n y=ragged.constant_value([['a', 'b', 'c'], ['d', 'e']]),\n expected=ragged.constant_value([[b'A', b'B', b'C'], [b'd', b'e']])),\n #=========================================================================\n # Coordinate-retrieval mode\n #=========================================================================\n dict( # shape=[D1]\n condition=[True, False, True, False, True],\n expected=[[0], [2], [4]]),\n dict( # shape=[D1, D2]\n condition=[[True, False], [False, True]],\n expected=[[0, 0], [1, 1]]),\n dict( # shape=[D1, (D2)]\n condition=ragged.constant_value([[True, False, True], [False, True]]),\n expected=[[0, 0], [0, 2], [1, 1]]),\n dict( # shape=[D1, (D2), (D3)]\n condition=ragged.constant_value([\n [[True, False, True], [False, True]],\n [[True], [], [False], [False, True, False]]\n ]),\n expected=[[0, 0, 0], [0, 0, 2], [0, 1, 1],\n [1, 0, 0], [1, 3, 1]]),\n dict( # shape=[D1, (D2), D3]\n condition=ragged.constant_value([\n [[True, False], [False, True]],\n [[True, False], [False, False], [True, False], [False, True]]\n ], ragged_rank=1),\n expected=[[0, 0, 0], [0, 1, 1],\n [1, 0, 0], [1, 2, 0], [1, 3, 1]]),\n dict( # shape=[D1, (D2), (D3), (D4)]\n condition=ragged.constant_value([\n [[[], [True]]],\n [[[True, False, True], [False, True]],\n [[True], [], [False], [False, True, False]]]\n ]),\n expected=[[0, 0, 1, 0],\n [1, 0, 0, 0], [1, 0, 0, 2], [1, 0, 1, 1],\n [1, 1, 0, 0], [1, 1, 3, 1]]),\n\n #=========================================================================\n # Elementwise value-selection mode\n #=========================================================================\n dict( # shape=[]\n condition=True, x='A', y='a', expected=b'A'),\n dict( # shape=[]\n condition=False, x='A', y='a', expected=b'a'),\n dict( # shape=[D1]\n condition=[True, False, True],\n x=['A', 'B', 'C'],\n y=['a', 'b', 'c'],\n expected=[b'A', b'b', b'C']),\n dict( # shape=[D1, D2]\n condition=[[True, False], [False, True]],\n x=[['A', 'B'], ['D', 'E']],\n y=[['a', 'b'], ['d', 'e']],\n expected=[[b'A', b'b'], [b'd', b'E']]),\n dict( # shape=[D1, (D2)]\n condition=ragged.constant_value([[True, False, True], [False, True]]),\n x=ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]),\n y=ragged.constant_value([['a', 'b', 'c'], ['d', 'e']]),\n expected=ragged.constant_value([[b'A', b'b', b'C'], [b'd', b'E']])),\n dict( # shape=[D1, (D2), D3]\n condition=ragged.constant_value([\n [[True, False], [False, True]],\n [[True, False], [False, False], [True, False], [False, True]]\n ], ragged_rank=1),\n x=ragged.constant_value([\n [['A', 'B'], ['C', 'D']],\n [['E', 'F'], ['G', 'H'], ['I', 'J'], ['K', 'L']]\n ], ragged_rank=1),\n y=ragged.constant_value([\n [['a', 'b'], ['c', 'd']],\n [['e', 'f'], ['g', 'h'], ['i', 'j'], ['k', 'l']]\n ], ragged_rank=1),\n expected=ragged.constant_value([\n [[b'A', b'b'], [b'c', b'D']],\n [[b'E', b'f'], [b'g', b'h'], [b'I', b'j'], [b'k', b'L']]\n ], ragged_rank=1)),\n dict( # shape=[D1, (D2), (D3), (D4)]\n condition=ragged.constant_value([\n [[[], [True]]],\n [[[True, False, True], [False, True]],\n [[True], [], [False], [False, True, False]]]\n ]),\n x=ragged.constant_value([\n [[[], ['A']]],\n [[['B', 'C', 'D'], ['E', 'F']],\n [['G'], [], ['H'], ['I', 'J', 'K']]]\n ]),\n y=ragged.constant_value([\n [[[], ['a']]],\n [[['b', 'c', 'd'], ['e', 'f']],\n [['g'], [], ['h'], ['i', 'j', 'k']]]\n ]),\n expected=ragged.constant_value([\n [[[], [b'A']]],\n [[[b'B', b'c', b'D'], [b'e', b'F']],\n [[b'G'], [], [b'h'], [b'i', b'J', b'k']]]\n ])),\n\n #=========================================================================\n # Elementwise row-selection mode\n #=========================================================================\n dict( # shape=[D1, D2]\n condition=[True, False, True],\n x=[['A', 'B'], ['C', 'D'], ['E', 'F']],\n y=[['a', 'b'], ['c', 'd'], ['e', 'f']],\n expected=[[b'A', b'B'], [b'c', b'd'], [b'E', b'F']]),\n dict( # shape=[D1, (D2)]\n condition=[True, False, True],\n x=ragged.constant_value([['A', 'B', 'C'], ['D', 'E'], ['F', 'G']]),\n y=ragged.constant_value([['a', 'b'], ['c'], ['d', 'e']]),\n expected=ragged.constant_value([[b'A', b'B', b'C'], [b'c'],\n [b'F', b'G']])),\n dict( # shape=[D1, (D2), (D3), (D4)]\n condition=ragged.constant_value([True, False]),\n x=ragged.constant_value([\n [[[], ['A']]],\n [[['B', 'C', 'D'], ['E', 'F']],\n [['G'], [], ['H'], ['I', 'J', 'K']]]\n ]),\n y=ragged.constant_value([[[['a']]], [[['b']]]]),\n expected=ragged.constant_value([[[[], [b'A']]], [[[b'b']]]])),\n ]) # pyformat: disable\n def testRaggedWhere(self, condition, expected, x=None, y=None):\n result = ragged.where(condition, x, y)\n self.assertEqual(\n getattr(result, 'ragged_rank', 0), getattr(expected, 'ragged_rank', 0))\n with self.test_session():\n result_value = self.evaluate(result)\n if hasattr(result_value, 'tolist'):\n result_value = result_value.tolist()\n if hasattr(expected, 'tolist'):\n expected = expected.tolist()\n self.assertEqual(result_value, expected)\n\n @parameterized.parameters([\n dict(\n condition=[True, False],\n x=[1, 2],\n error=ValueError,\n message='x and y must be either both None or both non-None'),\n dict(\n condition=ragged.constant_value([[True, False, True], [False, True]]),\n x=ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]),\n y=[['a', 'b'], ['d', 'e']],\n error=ValueError,\n message='Input shapes do not match.'),\n ])\n def testRaggedWhereErrors(self, condition, error, message, x=None, y=None):\n with self.assertRaisesRegexp(error, message):\n ragged.where(condition, x, y)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of tf.metrics module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import confusion_matrix\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sets\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import distribution_strategy_context\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef metric_variable(shape, dtype, validate_shape=True, name=None):\n \"\"\"Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.\n\n If running in a `DistributionStrategy` context, the variable will be\n \"replica local\". This means:\n\n * The returned object will be a container with separate variables\n per replica of the model.\n\n * When writing to the variable, e.g. using `assign_add` in a metric\n update, the update will be applied to the variable local to the\n replica.\n\n * To get a metric's result value, we need to sum the variable values\n across the replicas before computing the final answer. Furthermore,\n the final answer should be computed once instead of in every\n replica. Both of these are accomplished by running the computation\n of the final result value inside\n `distribution_strategy_context.get_replica_context().merge_call(fn)`.\n Inside the `merge_call()`, ops are only added to the graph once\n and access to a replica-local variable in a computation returns\n the sum across all replicas.\n\n Args:\n shape: Shape of the created variable.\n dtype: Type of the created variable.\n validate_shape: (Optional) Whether shape validation is enabled for\n the created variable.\n name: (Optional) String name of the created variable.\n\n Returns:\n A (non-trainable) variable initialized to zero, or if inside a\n `DistributionStrategy` scope a replica-local variable container.\n \"\"\"\n # Note that synchronization \"ON_READ\" implies trainable=False.\n return variable_scope.variable(\n lambda: array_ops.zeros(shape, dtype),\n collections=[\n ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES\n ],\n validate_shape=validate_shape,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM,\n name=name)\n\n\ndef _remove_squeezable_dimensions(predictions, labels, weights):\n \"\"\"Squeeze or expand last dim if needed.\n\n Squeezes last dim of `predictions` or `labels` if their rank differs by 1\n (using confusion_matrix.remove_squeezable_dimensions).\n Squeezes or expands last dim of `weights` if its rank differs by 1 from the\n new rank of `predictions`.\n\n If `weights` is scalar, it is kept scalar.\n\n This will use static shape if available. Otherwise, it will add graph\n operations, which could result in a performance hit.\n\n Args:\n predictions: Predicted values, a `Tensor` of arbitrary dimensions.\n labels: Optional label `Tensor` whose dimensions match `predictions`.\n weights: Optional weight scalar or `Tensor` whose dimensions match\n `predictions`.\n\n Returns:\n Tuple of `predictions`, `labels` and `weights`. Each of them possibly has\n the last dimension squeezed, `weights` could be extended by one dimension.\n \"\"\"\n predictions = ops.convert_to_tensor(predictions)\n if labels is not None:\n labels, predictions = confusion_matrix.remove_squeezable_dimensions(\n labels, predictions)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n if weights is None:\n return predictions, labels, None\n\n weights = ops.convert_to_tensor(weights)\n weights_shape = weights.get_shape()\n weights_rank = weights_shape.ndims\n if weights_rank == 0:\n return predictions, labels, weights\n\n predictions_shape = predictions.get_shape()\n predictions_rank = predictions_shape.ndims\n if (predictions_rank is not None) and (weights_rank is not None):\n # Use static rank.\n if weights_rank - predictions_rank == 1:\n weights = array_ops.squeeze(weights, [-1])\n elif predictions_rank - weights_rank == 1:\n weights = array_ops.expand_dims(weights, [-1])\n else:\n # Use dynamic rank.\n weights_rank_tensor = array_ops.rank(weights)\n rank_diff = weights_rank_tensor - array_ops.rank(predictions)\n\n def _maybe_expand_weights():\n return control_flow_ops.cond(\n math_ops.equal(rank_diff, -1),\n lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)\n\n # Don't attempt squeeze if it will fail based on static check.\n if ((weights_rank is not None) and\n (not weights_shape.dims[-1].is_compatible_with(1))):\n maybe_squeeze_weights = lambda: weights\n else:\n maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])\n\n def _maybe_adjust_weights():\n return control_flow_ops.cond(\n math_ops.equal(rank_diff, 1), maybe_squeeze_weights,\n _maybe_expand_weights)\n\n # If weights are scalar, do nothing. Otherwise, try to add or remove a\n # dimension to match predictions.\n weights = control_flow_ops.cond(\n math_ops.equal(weights_rank_tensor, 0), lambda: weights,\n _maybe_adjust_weights)\n return predictions, labels, weights\n\n\ndef _maybe_expand_labels(labels, predictions):\n \"\"\"If necessary, expand `labels` along last dimension to match `predictions`.\n\n Args:\n labels: `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN]. The latter implies\n num_labels=1, in which case the result is an expanded `labels` with shape\n [D1, ... DN, 1].\n predictions: `Tensor` with shape [D1, ... DN, num_classes].\n\n Returns:\n `labels` with the same rank as `predictions`.\n\n Raises:\n ValueError: if `labels` has invalid shape.\n \"\"\"\n with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:\n labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n\n # If sparse, expand sparse shape.\n if isinstance(labels, sparse_tensor.SparseTensor):\n return control_flow_ops.cond(\n math_ops.equal(\n array_ops.rank(predictions),\n array_ops.size(labels.dense_shape) + 1),\n lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda\n labels,\n shape=array_ops.concat((labels.dense_shape, (1,)), 0),\n name=scope),\n lambda: labels)\n\n # Otherwise, try to use static shape.\n labels_rank = labels.get_shape().ndims\n if labels_rank is not None:\n predictions_rank = predictions.get_shape().ndims\n if predictions_rank is not None:\n if predictions_rank == labels_rank:\n return labels\n if predictions_rank == labels_rank + 1:\n return array_ops.expand_dims(labels, -1, name=scope)\n raise ValueError(\n 'Unexpected labels shape %s for predictions shape %s.' %\n (labels.get_shape(), predictions.get_shape()))\n\n # Otherwise, use dynamic shape.\n return control_flow_ops.cond(\n math_ops.equal(array_ops.rank(predictions),\n array_ops.rank(labels) + 1),\n lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)\n\n\ndef _safe_scalar_div(numerator, denominator, name):\n \"\"\"Divides two values, returning 0 if the denominator is 0.\n\n Args:\n numerator: A scalar `float64` `Tensor`.\n denominator: A scalar `float64` `Tensor`.\n name: Name for the returned op.\n\n Returns:\n 0 if `denominator` == 0, else `numerator` / `denominator`\n \"\"\"\n numerator.get_shape().with_rank_at_most(1)\n denominator.get_shape().with_rank_at_most(1)\n return math_ops.div_no_nan(numerator, denominator, name=name)\n\n\ndef _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):\n \"\"\"Calculate a streaming confusion matrix.\n\n Calculates a confusion matrix. For estimation over a stream of data,\n the function creates an `update_op` operation.\n\n Args:\n labels: A `Tensor` of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened if its rank > 1.\n predictions: A `Tensor` of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since a confusion matrix of\n dimension = [num_classes, num_classes] will be allocated.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n\n Returns:\n total_cm: A `Tensor` representing the confusion matrix.\n update_op: An operation that increments the confusion matrix.\n \"\"\"\n # Local variable to accumulate the predictions in the confusion matrix.\n total_cm = metric_variable(\n [num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')\n\n # Cast the type to int64 required by confusion_matrix_ops.\n predictions = math_ops.to_int64(predictions)\n labels = math_ops.to_int64(labels)\n num_classes = math_ops.to_int64(num_classes)\n\n # Flatten the input if its rank > 1.\n if predictions.get_shape().ndims > 1:\n predictions = array_ops.reshape(predictions, [-1])\n\n if labels.get_shape().ndims > 1:\n labels = array_ops.reshape(labels, [-1])\n\n if (weights is not None) and (weights.get_shape().ndims > 1):\n weights = array_ops.reshape(weights, [-1])\n\n # Accumulate the prediction to current confusion matrix.\n current_cm = confusion_matrix.confusion_matrix(\n labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)\n update_op = state_ops.assign_add(total_cm, current_cm)\n return total_cm, update_op\n\n\ndef _aggregate_across_replicas(metrics_collections, metric_value_fn, *args):\n \"\"\"Aggregate metric value across replicas.\"\"\"\n def fn(distribution, *a):\n \"\"\"Call `metric_value_fn` in the correct control flow context.\"\"\"\n if hasattr(distribution.extended, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was called,\n # then we enter that context to create the metric value op. If the\n # caputred context is `None`, ops.control_dependencies(None) gives the\n # desired behavior. Else we use `Enter` and `Exit` to enter and exit the\n # captured context.\n # This special handling is needed because sometimes the metric is created\n # inside a while_loop (and perhaps a TPU rewrite context). But we don't\n # want the value op to be evaluated every step or on the TPU. So we\n # create it outside so that it can be evaluated at the end on the host,\n # once the update ops have been evaluted.\n\n # pylint: disable=protected-access\n if distribution.extended._outer_control_flow_context is None:\n with ops.control_dependencies(None):\n metric_value = metric_value_fn(distribution, *a)\n else:\n distribution.extended._outer_control_flow_context.Enter()\n metric_value = metric_value_fn(distribution, *a)\n distribution.extended._outer_control_flow_context.Exit()\n # pylint: enable=protected-access\n else:\n metric_value = metric_value_fn(distribution, *a)\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric_value)\n return metric_value\n\n return distribution_strategy_context.get_replica_context().merge_call(\n fn, args=args)\n\n\n@tf_export('metrics.mean')\ndef mean(values,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the (weighted) mean of the given values.\n\n The `mean` function creates two local variables, `total` and `count`\n that are used to compute the average of `values`. This average is ultimately\n returned as `mean` which is an idempotent operation that simply divides\n `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean`.\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights`, and it increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `values`, and must be broadcastable to `values` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `values` dimension).\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean: A `Tensor` representing the current mean, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_value`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean is not supported when eager execution '\n 'is enabled.')\n\n with variable_scope.variable_scope(name, 'mean', (values, weights)):\n values = math_ops.to_float(values)\n\n total = metric_variable([], dtypes.float32, name='total')\n count = metric_variable([], dtypes.float32, name='count')\n\n if weights is None:\n num_values = math_ops.to_float(array_ops.size(values))\n else:\n values, _, weights = _remove_squeezable_dimensions(\n predictions=values, labels=None, weights=weights)\n weights = weights_broadcast_ops.broadcast_weights(\n math_ops.to_float(weights), values)\n values = math_ops.multiply(values, weights)\n num_values = math_ops.reduce_sum(weights)\n\n update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))\n with ops.control_dependencies([values]):\n update_count_op = state_ops.assign_add(count, num_values)\n\n def compute_mean(_, t, c):\n return math_ops.div_no_nan(t, math_ops.maximum(c, 0), name='value')\n\n mean_t = _aggregate_across_replicas(\n metrics_collections, compute_mean, total, count)\n update_op = math_ops.div_no_nan(\n update_total_op, math_ops.maximum(update_count_op, 0), name='update_op')\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_t, update_op\n\n\n@tf_export('metrics.accuracy')\ndef accuracy(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculates how often `predictions` matches `labels`.\n\n The `accuracy` function creates two local variables, `total` and\n `count` that are used to compute the frequency with which `predictions`\n matches `labels`. This frequency is ultimately returned as `accuracy`: an\n idempotent operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `accuracy`.\n Internally, an `is_correct` operation computes a `Tensor` with elements 1.0\n where the corresponding elements of `predictions` and `labels` match and 0.0\n otherwise. Then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `is_correct`, and it increments `count` with the\n reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose shape matches\n `predictions`.\n predictions: The predicted values, a `Tensor` of any shape.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `accuracy` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n accuracy: A `Tensor` representing the accuracy, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `accuracy`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.accuracy is not supported when eager '\n 'execution is enabled.')\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=predictions, labels=labels, weights=weights)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n if labels.dtype != predictions.dtype:\n predictions = math_ops.cast(predictions, labels.dtype)\n is_correct = math_ops.to_float(math_ops.equal(predictions, labels))\n return mean(is_correct, weights, metrics_collections, updates_collections,\n name or 'accuracy')\n\n\ndef _confusion_matrix_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n includes=None):\n \"\"\"Computes true_positives, false_negatives, true_negatives, false_positives.\n\n This function creates up to four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives`.\n `true_positive[i]` is defined as the total weight of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `false_negatives[i]` is defined as the total weight of values in `predictions`\n at most `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `true_negatives[i]` is defined as the total weight of values in `predictions`\n at most `thresholds[i]` whose corresponding entry in `labels` is `False`.\n `false_positives[i]` is defined as the total weight of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `False`.\n\n For estimation of these metrics over a stream of data, for each metric the\n function respectively creates an `update_op` operation that updates the\n variable and returns its value.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,\n default to all four.\n\n Returns:\n values: Dict of variables of shape `[len(thresholds)]`. Keys are from\n `includes`.\n update_ops: Dict of operations that increments the `values`. Keys are from\n `includes`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `includes` contains invalid keys.\n \"\"\"\n all_includes = ('tp', 'fn', 'tn', 'fp')\n if includes is None:\n includes = all_includes\n else:\n for include in includes:\n if include not in all_includes:\n raise ValueError('Invalid key: %s.' % include)\n\n with ops.control_dependencies([\n check_ops.assert_greater_equal(\n predictions,\n math_ops.cast(0.0, dtype=predictions.dtype),\n message='predictions must be in [0, 1]'),\n check_ops.assert_less_equal(\n predictions,\n math_ops.cast(1.0, dtype=predictions.dtype),\n message='predictions must be in [0, 1]')\n ]):\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.to_float(predictions),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n\n num_thresholds = len(thresholds)\n\n # Reshape predictions and labels.\n predictions_2d = array_ops.reshape(predictions, [-1, 1])\n labels_2d = array_ops.reshape(\n math_ops.cast(labels, dtype=dtypes.bool), [1, -1])\n\n # Use static shape if known.\n num_predictions = predictions_2d.get_shape().as_list()[0]\n\n # Otherwise use dynamic shape.\n if num_predictions is None:\n num_predictions = array_ops.shape(predictions_2d)[0]\n thresh_tiled = array_ops.tile(\n array_ops.expand_dims(array_ops.constant(thresholds), [1]),\n array_ops.stack([1, num_predictions]))\n\n # Tile the predictions after thresholding them across different thresholds.\n pred_is_pos = math_ops.greater(\n array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),\n thresh_tiled)\n if ('fn' in includes) or ('tn' in includes):\n pred_is_neg = math_ops.logical_not(pred_is_pos)\n\n # Tile labels by number of thresholds\n label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])\n if ('fp' in includes) or ('tn' in includes):\n label_is_neg = math_ops.logical_not(label_is_pos)\n\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(\n math_ops.to_float(weights), predictions)\n weights_tiled = array_ops.tile(\n array_ops.reshape(weights, [1, -1]), [num_thresholds, 1])\n thresh_tiled.get_shape().assert_is_compatible_with(\n weights_tiled.get_shape())\n else:\n weights_tiled = None\n\n values = {}\n update_ops = {}\n\n if 'tp' in includes:\n true_p = metric_variable(\n [num_thresholds], dtypes.float32, name='true_positives')\n is_true_positive = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_pos))\n if weights_tiled is not None:\n is_true_positive *= weights_tiled\n update_ops['tp'] = state_ops.assign_add(true_p,\n math_ops.reduce_sum(\n is_true_positive, 1))\n values['tp'] = true_p\n\n if 'fn' in includes:\n false_n = metric_variable(\n [num_thresholds], dtypes.float32, name='false_negatives')\n is_false_negative = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_neg))\n if weights_tiled is not None:\n is_false_negative *= weights_tiled\n update_ops['fn'] = state_ops.assign_add(false_n,\n math_ops.reduce_sum(\n is_false_negative, 1))\n values['fn'] = false_n\n\n if 'tn' in includes:\n true_n = metric_variable(\n [num_thresholds], dtypes.float32, name='true_negatives')\n is_true_negative = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_neg))\n if weights_tiled is not None:\n is_true_negative *= weights_tiled\n update_ops['tn'] = state_ops.assign_add(true_n,\n math_ops.reduce_sum(\n is_true_negative, 1))\n values['tn'] = true_n\n\n if 'fp' in includes:\n false_p = metric_variable(\n [num_thresholds], dtypes.float32, name='false_positives')\n is_false_positive = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_pos))\n if weights_tiled is not None:\n is_false_positive *= weights_tiled\n update_ops['fp'] = state_ops.assign_add(false_p,\n math_ops.reduce_sum(\n is_false_positive, 1))\n values['fp'] = false_p\n\n return values, update_ops\n\n\ndef _aggregate_variable(v, collections):\n f = lambda distribution, value: distribution.read_var(value)\n return _aggregate_across_replicas(collections, f, v)\n\n\n@tf_export('metrics.auc')\ndef auc(labels,\n predictions,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n curve='ROC',\n name=None,\n summation_method='trapezoidal'):\n \"\"\"Computes the approximate AUC via a Riemann sum.\n\n The `auc` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` that are used to\n compute the AUC. To discretize the AUC curve, a linearly spaced set of\n thresholds is used to compute pairs of recall and precision values. The area\n under the ROC-curve is therefore computed using the height of the recall\n values by the false positive rate, while the area under the PR-curve is the\n computed using the height of the precision values by the recall.\n\n This value is ultimately returned as `auc`, an idempotent operation that\n computes the area under a discretized curve of precision versus recall values\n (computed using the aforementioned variables). The `num_thresholds` variable\n controls the degree of discretization with larger numbers of thresholds more\n closely approximating the true AUC. The quality of the approximation may vary\n dramatically depending on `num_thresholds`.\n\n For best results, `predictions` should be distributed approximately uniformly\n in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC\n approximation may be poor if this is not the case. Setting `summation_method`\n to 'minoring' or 'majoring' can help quantify the error in the approximation\n by providing lower or upper bound estimate of the AUC.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `auc`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use when discretizing the roc\n curve.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n curve: Specifies the name of the curve to be computed, 'ROC' [default] or\n 'PR' for the Precision-Recall-curve.\n name: An optional variable_scope name.\n summation_method: Specifies the Riemann summation method used\n (https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that\n applies the trapezoidal rule; 'careful_interpolation', a variant of it\n differing only by a more correct interpolation scheme for PR-AUC -\n interpolating (true/false) positives but not the ratio that is precision;\n 'minoring' that applies left summation for increasing intervals and right\n summation for decreasing intervals; 'majoring' that does the opposite.\n Note that 'careful_interpolation' is strictly preferred to 'trapezoidal'\n (to be deprecated soon) as it applies the same method for ROC, and a\n better one (see Davis & Goadrich 2006 for details) for the PR curve.\n\n Returns:\n auc: A scalar `Tensor` representing the current area-under-curve.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `auc`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.auc is not supported when eager execution '\n 'is enabled.')\n\n with variable_scope.variable_scope(name, 'auc',\n (labels, predictions, weights)):\n if curve != 'ROC' and curve != 'PR':\n raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [\n (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights)\n\n # Add epsilons to avoid dividing by 0.\n epsilon = 1.0e-6\n\n def interpolate_pr_auc(tp, fp, fn):\n \"\"\"Interpolation formula inspired by section 4 of Davis & Goadrich 2006.\n\n Note here we derive & use a closed formula not present in the paper\n - as follows:\n Modeling all of TP (true positive weight),\n FP (false positive weight) and their sum P = TP + FP (positive weight)\n as varying linearly within each interval [A, B] between successive\n thresholds, we get\n Precision = (TP_A + slope * (P - P_A)) / P\n with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A).\n The area within the interval is thus (slope / total_pos_weight) times\n int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}\n int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}\n where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in\n int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)\n Bringing back the factor (slope / total_pos_weight) we'd put aside, we get\n slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight\n where dTP == TP_B - TP_A.\n Note that when P_A == 0 the above calculation simplifies into\n int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)\n which is really equivalent to imputing constant precision throughout the\n first bucket having >0 true positives.\n\n Args:\n tp: true positive counts\n fp: false positive counts\n fn: false negative counts\n Returns:\n pr_auc: an approximation of the area under the P-R curve.\n \"\"\"\n dtp = tp[:num_thresholds - 1] - tp[1:]\n p = tp + fp\n prec_slope = math_ops.div_no_nan(\n dtp,\n math_ops.maximum(p[:num_thresholds - 1] - p[1:], 0),\n name='prec_slope')\n intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:])\n safe_p_ratio = array_ops.where(\n math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0),\n math_ops.div_no_nan(\n p[:num_thresholds - 1],\n math_ops.maximum(p[1:], 0),\n name='recall_relative_ratio'), array_ops.ones_like(p[1:]))\n return math_ops.reduce_sum(\n math_ops.div_no_nan(\n prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),\n math_ops.maximum(tp[1:] + fn[1:], 0),\n name='pr_auc_increment'),\n name='interpolate_pr_auc')\n\n def compute_auc(tp, fn, tn, fp, name):\n \"\"\"Computes the roc-auc or pr-auc based on confusion counts.\"\"\"\n if curve == 'PR':\n if summation_method == 'trapezoidal':\n logging.warning(\n 'Trapezoidal rule is known to produce incorrect PR-AUCs; '\n 'please switch to \"careful_interpolation\" instead.')\n elif summation_method == 'careful_interpolation':\n # This one is a bit tricky and is handled separately.\n return interpolate_pr_auc(tp, fp, fn)\n rec = math_ops.div(tp + epsilon, tp + fn + epsilon)\n if curve == 'ROC':\n fp_rate = math_ops.div(fp, fp + tn + epsilon)\n x = fp_rate\n y = rec\n else: # curve == 'PR'.\n prec = math_ops.div(tp + epsilon, tp + fp + epsilon)\n x = rec\n y = prec\n if summation_method in ('trapezoidal', 'careful_interpolation'):\n # Note that the case ('PR', 'careful_interpolation') has been handled\n # above.\n return math_ops.reduce_sum(\n math_ops.multiply(x[:num_thresholds - 1] - x[1:],\n (y[:num_thresholds - 1] + y[1:]) / 2.),\n name=name)\n elif summation_method == 'minoring':\n return math_ops.reduce_sum(\n math_ops.multiply(x[:num_thresholds - 1] - x[1:],\n math_ops.minimum(y[:num_thresholds - 1], y[1:])),\n name=name)\n elif summation_method == 'majoring':\n return math_ops.reduce_sum(\n math_ops.multiply(x[:num_thresholds - 1] - x[1:],\n math_ops.maximum(y[:num_thresholds - 1], y[1:])),\n name=name)\n else:\n raise ValueError('Invalid summation_method: %s' % summation_method)\n\n # sum up the areas of all the trapeziums\n def compute_auc_value(_, values):\n return compute_auc(values['tp'], values['fn'], values['tn'], values['fp'],\n 'value')\n\n auc_value = _aggregate_across_replicas(\n metrics_collections, compute_auc_value, values)\n update_op = compute_auc(update_ops['tp'], update_ops['fn'],\n update_ops['tn'], update_ops['fp'], 'update_op')\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return auc_value, update_op\n\n\n@tf_export('metrics.mean_absolute_error')\ndef mean_absolute_error(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean absolute error between the labels and predictions.\n\n The `mean_absolute_error` function creates two local variables,\n `total` and `count` that are used to compute the mean absolute error. This\n average is weighted by `weights`, and it is ultimately returned as\n `mean_absolute_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_absolute_error`. Internally, an `absolute_errors` operation computes the\n absolute value of the differences between `predictions` and `labels`. Then\n `update_op` increments `total` with the reduced sum of the product of\n `weights` and `absolute_errors`, and it increments `count` with the reduced\n sum of `weights`\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of the same shape as `predictions`.\n predictions: A `Tensor` of arbitrary shape.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_absolute_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_absolute_error: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_absolute_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_absolute_error is not supported '\n 'when eager execution is enabled.')\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=predictions, labels=labels, weights=weights)\n absolute_errors = math_ops.abs(predictions - labels)\n return mean(absolute_errors, weights, metrics_collections,\n updates_collections, name or 'mean_absolute_error')\n\n\n@tf_export('metrics.mean_cosine_distance')\ndef mean_cosine_distance(labels,\n predictions,\n dim,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the cosine distance between the labels and predictions.\n\n The `mean_cosine_distance` function creates two local variables,\n `total` and `count` that are used to compute the average cosine distance\n between `predictions` and `labels`. This average is weighted by `weights`,\n and it is ultimately returned as `mean_distance`, which is an idempotent\n operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_distance`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of arbitrary shape.\n predictions: A `Tensor` of the same shape as `labels`.\n dim: The dimension along which the cosine distance is computed.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension). Also,\n dimension `dim` must be `1`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_distance: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when '\n 'eager execution is enabled.')\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=predictions, labels=labels, weights=weights)\n radial_diffs = math_ops.multiply(predictions, labels)\n radial_diffs = math_ops.reduce_sum(\n radial_diffs, reduction_indices=[\n dim,\n ], keepdims=True)\n mean_distance, update_op = mean(radial_diffs, weights, None, None, name or\n 'mean_cosine_distance')\n mean_distance = math_ops.subtract(1.0, mean_distance)\n update_op = math_ops.subtract(1.0, update_op)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_distance)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_distance, update_op\n\n\n@tf_export('metrics.mean_per_class_accuracy')\ndef mean_per_class_accuracy(labels,\n predictions,\n num_classes,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculates the mean of the per-class accuracies.\n\n Calculates the accuracy for each class, then takes the mean of that.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates the accuracy of each class and returns\n them.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened if its rank > 1.\n predictions: A `Tensor` of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since two variables with shape =\n [num_classes] will be allocated.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_per_class_accuracy'\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_accuracy: A `Tensor` representing the mean per class accuracy.\n update_op: An operation that updates the accuracy tensor.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported '\n 'when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'mean_accuracy',\n (predictions, labels, weights)):\n labels = math_ops.to_int64(labels)\n\n # Flatten the input if its rank > 1.\n if labels.get_shape().ndims > 1:\n labels = array_ops.reshape(labels, [-1])\n\n if predictions.get_shape().ndims > 1:\n predictions = array_ops.reshape(predictions, [-1])\n\n # Check if shape is compatible.\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n total = metric_variable([num_classes], dtypes.float32, name='total')\n count = metric_variable([num_classes], dtypes.float32, name='count')\n\n ones = array_ops.ones([array_ops.size(labels)], dtypes.float32)\n\n if labels.dtype != predictions.dtype:\n predictions = math_ops.cast(predictions, labels.dtype)\n is_correct = math_ops.to_float(math_ops.equal(predictions, labels))\n\n if weights is not None:\n if weights.get_shape().ndims > 1:\n weights = array_ops.reshape(weights, [-1])\n weights = math_ops.to_float(weights)\n\n is_correct *= weights\n ones *= weights\n\n update_total_op = state_ops.scatter_add(total, labels, ones)\n update_count_op = state_ops.scatter_add(count, labels, is_correct)\n\n def compute_mean_accuracy(_, count, total):\n per_class_accuracy = math_ops.div_no_nan(\n count, math_ops.maximum(total, 0), name=None)\n mean_accuracy_v = math_ops.reduce_mean(\n per_class_accuracy, name='mean_accuracy')\n return mean_accuracy_v\n\n mean_accuracy_v = _aggregate_across_replicas(\n metrics_collections, compute_mean_accuracy, count, total)\n\n update_op = math_ops.div_no_nan(\n update_count_op, math_ops.maximum(update_total_op, 0), name='update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_accuracy_v, update_op\n\n\n@tf_export('metrics.mean_iou')\ndef mean_iou(labels,\n predictions,\n num_classes,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculate per-step mean Intersection-Over-Union (mIOU).\n\n Mean Intersection-Over-Union is a common evaluation metric for\n semantic image segmentation, which first computes the IOU for each\n semantic class and then computes the average over classes.\n IOU is defined as follows:\n IOU = true_positive / (true_positive + false_positive + false_negative).\n The predictions are accumulated in a confusion matrix, weighted by `weights`,\n and mIOU is then calculated from it.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean_iou`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened if its rank > 1.\n predictions: A `Tensor` of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since a confusion matrix of\n dimension = [num_classes, num_classes] will be allocated.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `mean_iou`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_iou: A `Tensor` representing the mean intersection-over-union.\n update_op: An operation that increments the confusion matrix.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_iou is not supported when '\n 'eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'mean_iou',\n (predictions, labels, weights)):\n # Check if shape is compatible.\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n total_cm, update_op = _streaming_confusion_matrix(labels, predictions,\n num_classes, weights)\n\n def compute_mean_iou(_, total_cm):\n \"\"\"Compute the mean intersection-over-union via the confusion matrix.\"\"\"\n sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))\n sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))\n cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))\n denominator = sum_over_row + sum_over_col - cm_diag\n\n # The mean is only computed over classes that appear in the\n # label or prediction tensor. If the denominator is 0, we need to\n # ignore the class.\n num_valid_entries = math_ops.reduce_sum(\n math_ops.cast(\n math_ops.not_equal(denominator, 0), dtype=dtypes.float32))\n\n # If the value of the denominator is 0, set it to 1 to avoid\n # zero division.\n denominator = array_ops.where(\n math_ops.greater(denominator, 0), denominator,\n array_ops.ones_like(denominator))\n iou = math_ops.div(cm_diag, denominator)\n\n # If the number of valid entries is 0 (no classes) we return 0.\n result = array_ops.where(\n math_ops.greater(num_valid_entries, 0),\n math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0)\n return result\n\n # TODO(priyag): Use outside_compilation if in TPU context.\n mean_iou_v = _aggregate_across_replicas(\n metrics_collections, compute_mean_iou, total_cm)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_iou_v, update_op\n\n\n@tf_export('metrics.mean_relative_error')\ndef mean_relative_error(labels,\n predictions,\n normalizer,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean relative error by normalizing with the given values.\n\n The `mean_relative_error` function creates two local variables,\n `total` and `count` that are used to compute the mean relative absolute error.\n This average is weighted by `weights`, and it is ultimately returned as\n `mean_relative_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_reative_error`. Internally, a `relative_errors` operation divides the\n absolute value of the differences between `predictions` and `labels` by the\n `normalizer`. Then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `relative_errors`, and it increments `count` with the\n reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of the same shape as `predictions`.\n predictions: A `Tensor` of arbitrary shape.\n normalizer: A `Tensor` of the same shape as `predictions`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_relative_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_relative_error: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_relative_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_relative_error is not supported when '\n 'eager execution is enabled.')\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=predictions, labels=labels, weights=weights)\n\n predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(\n predictions, normalizer)\n predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())\n relative_errors = array_ops.where(\n math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels),\n math_ops.div(math_ops.abs(labels - predictions), normalizer))\n return mean(relative_errors, weights, metrics_collections,\n updates_collections, name or 'mean_relative_error')\n\n\n@tf_export('metrics.mean_squared_error')\ndef mean_squared_error(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean squared error between the labels and predictions.\n\n The `mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the mean squared error.\n This average is weighted by `weights`, and it is ultimately returned as\n `mean_squared_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_squared_error`. Internally, a `squared_error` operation computes the\n element-wise square of the difference between `predictions` and `labels`. Then\n `update_op` increments `total` with the reduced sum of the product of\n `weights` and `squared_error`, and it increments `count` with the reduced sum\n of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of the same shape as `predictions`.\n predictions: A `Tensor` of arbitrary shape.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_squared_error: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_squared_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_squared_error is not supported when '\n 'eager execution is enabled.')\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=predictions, labels=labels, weights=weights)\n squared_error = math_ops.square(labels - predictions)\n return mean(squared_error, weights, metrics_collections, updates_collections,\n name or 'mean_squared_error')\n\n\n@tf_export('metrics.mean_tensor')\ndef mean_tensor(values,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the element-wise (weighted) mean of the given tensors.\n\n In contrast to the `mean` function which returns a scalar with the\n mean, this function returns an average tensor with the same shape as the\n input tensors.\n\n The `mean_tensor` function creates two local variables,\n `total_tensor` and `count_tensor` that are used to compute the average of\n `values`. This average is ultimately returned as `mean` which is an idempotent\n operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean`.\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights`, and it increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `values`, and must be broadcastable to `values` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `values` dimension).\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean: A float `Tensor` representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_value`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_tensor is not supported when '\n 'eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'mean', (values, weights)):\n values = math_ops.to_float(values)\n total = metric_variable(\n values.get_shape(), dtypes.float32, name='total_tensor')\n count = metric_variable(\n values.get_shape(), dtypes.float32, name='count_tensor')\n\n num_values = array_ops.ones_like(values)\n if weights is not None:\n values, _, weights = _remove_squeezable_dimensions(\n predictions=values, labels=None, weights=weights)\n weights = weights_broadcast_ops.broadcast_weights(\n math_ops.to_float(weights), values)\n values = math_ops.multiply(values, weights)\n num_values = math_ops.multiply(num_values, weights)\n\n update_total_op = state_ops.assign_add(total, values)\n with ops.control_dependencies([values]):\n update_count_op = state_ops.assign_add(count, num_values)\n\n compute_mean = lambda _, t, c: math_ops.div_no_nan( # pylint: disable=g-long-lambda\n t, math_ops.maximum(c, 0), name='value')\n\n mean_t = _aggregate_across_replicas(\n metrics_collections, compute_mean, total, count)\n\n update_op = math_ops.div_no_nan(\n update_total_op, math_ops.maximum(update_count_op, 0), name='update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_t, update_op\n\n\n@tf_export('metrics.percentage_below')\ndef percentage_below(values,\n threshold,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the percentage of values less than the given threshold.\n\n The `percentage_below` function creates two local variables,\n `total` and `count` that are used to compute the percentage of `values` that\n fall below `threshold`. This rate is weighted by `weights`, and it is\n ultimately returned as `percentage` which is an idempotent operation that\n simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `percentage`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A numeric `Tensor` of arbitrary size.\n threshold: A scalar threshold.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `values`, and must be broadcastable to `values` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `values` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n percentage: A `Tensor` representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.percentage_below is not supported when '\n 'eager execution is enabled.')\n\n is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))\n return mean(is_below_threshold, weights, metrics_collections,\n updates_collections, name or 'percentage_below_threshold')\n\n\ndef _count_condition(values,\n weights=None,\n metrics_collections=None,\n updates_collections=None):\n \"\"\"Sums the weights of cases where the given values are True.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `bool` `Tensor` of arbitrary size.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `values`, and must be broadcastable to `values` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `values` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n check_ops.assert_type(values, dtypes.bool)\n count = metric_variable([], dtypes.float32, name='count')\n\n values = math_ops.to_float(values)\n if weights is not None:\n with ops.control_dependencies((check_ops.assert_rank_in(\n weights, (0, array_ops.rank(values))),)):\n weights = math_ops.to_float(weights)\n values = math_ops.multiply(values, weights)\n\n value_tensor = _aggregate_variable(count, metrics_collections)\n\n update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return value_tensor, update_op\n\n\n@tf_export('metrics.false_negatives')\ndef false_negatives(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the total number of false negatives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.false_negatives is not supported when '\n 'eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'false_negatives',\n (predictions, labels, weights)):\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n is_false_negative = math_ops.logical_and(\n math_ops.equal(labels, True), math_ops.equal(predictions, False))\n return _count_condition(is_false_negative, weights, metrics_collections,\n updates_collections)\n\n\n@tf_export('metrics.false_negatives_at_thresholds')\ndef false_negatives_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes false negatives at provided threshold values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `false_negatives`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n false_negatives: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that updates the `false_negatives` variable and\n returns its current value.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'false_negatives',\n (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights=weights, includes=('fn',))\n\n fn_value = _aggregate_variable(values['fn'], metrics_collections)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_ops['fn'])\n\n return fn_value, update_ops['fn']\n\n\n@tf_export('metrics.false_positives')\ndef false_positives(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of false positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.false_positives is not supported when '\n 'eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'false_positives',\n (predictions, labels, weights)):\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n is_false_positive = math_ops.logical_and(\n math_ops.equal(labels, False), math_ops.equal(predictions, True))\n return _count_condition(is_false_positive, weights, metrics_collections,\n updates_collections)\n\n\n@tf_export('metrics.false_positives_at_thresholds')\ndef false_positives_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes false positives at provided threshold values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `false_positives`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n false_positives: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that updates the `false_positives` variable and\n returns its current value.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.false_positives_at_thresholds is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'false_positives',\n (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights=weights, includes=('fp',))\n\n fp_value = _aggregate_variable(values['fp'], metrics_collections)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_ops['fp'])\n\n return fp_value, update_ops['fp']\n\n\n@tf_export('metrics.true_negatives')\ndef true_negatives(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of true_negatives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_negatives is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'true_negatives',\n (predictions, labels, weights)):\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n is_true_negative = math_ops.logical_and(\n math_ops.equal(labels, False), math_ops.equal(predictions, False))\n return _count_condition(is_true_negative, weights, metrics_collections,\n updates_collections)\n\n\n@tf_export('metrics.true_negatives_at_thresholds')\ndef true_negatives_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes true negatives at provided threshold values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `true_negatives`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n true_negatives: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that updates the `true_negatives` variable and\n returns its current value.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'true_negatives',\n (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights=weights, includes=('tn',))\n\n tn_value = _aggregate_variable(values['tn'], metrics_collections)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_ops['tn'])\n\n return tn_value, update_ops['tn']\n\n\n@tf_export('metrics.true_positives')\ndef true_positives(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of true_positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_positives is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'true_positives',\n (predictions, labels, weights)):\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n is_true_positive = math_ops.logical_and(\n math_ops.equal(labels, True), math_ops.equal(predictions, True))\n return _count_condition(is_true_positive, weights, metrics_collections,\n updates_collections)\n\n\n@tf_export('metrics.true_positives_at_thresholds')\ndef true_positives_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes true positives at provided threshold values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `true_positives`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n true_positives: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that updates the `true_positives` variable and\n returns its current value.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_positives_at_thresholds is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'true_positives',\n (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights=weights, includes=('tp',))\n\n tp_value = _aggregate_variable(values['tp'], metrics_collections)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_ops['tp'])\n\n return tp_value, update_ops['tp']\n\n\n@tf_export('metrics.precision')\ndef precision(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the precision of the predictions with respect to the labels.\n\n The `precision` function creates two local variables,\n `true_positives` and `false_positives`, that are used to compute the\n precision. This value is ultimately returned as `precision`, an idempotent\n operation that simply divides `true_positives` by the sum of `true_positives`\n and `false_positives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision`. `update_op` weights each prediction by the corresponding value in\n `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `precision` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n precision: Scalar float `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.precision is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'precision',\n (predictions, labels, weights)):\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n\n true_p, true_positives_update_op = true_positives(\n labels,\n predictions,\n weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n false_p, false_positives_update_op = false_positives(\n labels,\n predictions,\n weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n\n def compute_precision(tp, fp, name):\n return array_ops.where(\n math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name)\n\n def once_across_replicas(_, true_p, false_p):\n return compute_precision(true_p, false_p, 'value')\n\n p = _aggregate_across_replicas(metrics_collections, once_across_replicas,\n true_p, false_p)\n\n update_op = compute_precision(true_positives_update_op,\n false_positives_update_op, 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return p, update_op\n\n\n@tf_export('metrics.precision_at_thresholds')\ndef precision_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision values for different `thresholds` on `predictions`.\n\n The `precision_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds. `precision[i]` is defined as the total\n weight of values in `predictions` above `thresholds[i]` whose corresponding\n entry in `labels` is `True`, divided by the total weight of values in\n `predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +\n false_positives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n precision: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.precision_at_thresholds is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'precision_at_thresholds',\n (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights, includes=('tp', 'fp'))\n\n # Avoid division by zero.\n epsilon = 1e-7\n\n def compute_precision(tp, fp, name):\n return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)\n\n def precision_across_replicas(_, values):\n return compute_precision(values['tp'], values['fp'], 'value')\n\n prec = _aggregate_across_replicas(\n metrics_collections, precision_across_replicas, values)\n\n update_op = compute_precision(update_ops['tp'], update_ops['fp'],\n 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return prec, update_op\n\n\n@tf_export('metrics.recall')\ndef recall(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the recall of the predictions with respect to the labels.\n\n The `recall` function creates two local variables, `true_positives`\n and `false_negatives`, that are used to compute the recall. This value is\n ultimately returned as `recall`, an idempotent operation that simply divides\n `true_positives` by the sum of `true_positives` and `false_negatives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` that updates these variables and returns the `recall`. `update_op`\n weights each prediction by the corresponding value in `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `recall` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n recall: Scalar float `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.recall is not supported is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'recall',\n (predictions, labels, weights)):\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n\n true_p, true_positives_update_op = true_positives(\n labels,\n predictions,\n weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n false_n, false_negatives_update_op = false_negatives(\n labels,\n predictions,\n weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n\n def compute_recall(true_p, false_n, name):\n return array_ops.where(\n math_ops.greater(true_p + false_n, 0),\n math_ops.div(true_p, true_p + false_n), 0, name)\n\n def once_across_replicas(_, true_p, false_n):\n return compute_recall(true_p, false_n, 'value')\n\n rec = _aggregate_across_replicas(\n metrics_collections, once_across_replicas, true_p, false_n)\n\n update_op = compute_recall(true_positives_update_op,\n false_negatives_update_op, 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return rec, update_op\n\n\ndef _at_k_name(name, k=None, class_id=None):\n if k is not None:\n name = '%s_at_%d' % (name, k)\n else:\n name = '%s_at_k' % (name)\n if class_id is not None:\n name = '%s_class%d' % (name, class_id)\n return name\n\n\ndef _select_class_id(ids, selected_id):\n \"\"\"Filter all but `selected_id` out of `ids`.\n\n Args:\n ids: `int64` `Tensor` or `SparseTensor` of IDs.\n selected_id: Int id to select.\n\n Returns:\n `SparseTensor` of same dimensions as `ids`. This contains only the entries\n equal to `selected_id`.\n \"\"\"\n ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)\n if isinstance(ids, sparse_tensor.SparseTensor):\n return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values,\n selected_id))\n\n # TODO(ptucker): Make this more efficient, maybe add a sparse version of\n # tf.equal and tf.reduce_any?\n\n # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.\n ids_shape = array_ops.shape(ids, out_type=dtypes.int64)\n ids_last_dim = array_ops.size(ids_shape) - 1\n filled_selected_id_shape = math_ops.reduced_shape(ids_shape,\n array_ops.reshape(\n ids_last_dim, [1]))\n\n # Intersect `ids` with the selected ID.\n filled_selected_id = array_ops.fill(filled_selected_id_shape,\n math_ops.to_int64(selected_id))\n result = sets.set_intersection(filled_selected_id, ids)\n return sparse_tensor.SparseTensor(\n indices=result.indices, values=result.values, dense_shape=ids_shape)\n\n\ndef _maybe_select_class_id(labels, predictions_idx, selected_id=None):\n \"\"\"If class ID is specified, filter all other classes.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]\n where N >= 1. Commonly, N=1 and `predictions_idx` has shape\n [batch size, k].\n selected_id: Int id to select.\n\n Returns:\n Tuple of `labels` and `predictions_idx`, possibly with classes removed.\n \"\"\"\n if selected_id is None:\n return labels, predictions_idx\n return (_select_class_id(labels, selected_id),\n _select_class_id(predictions_idx, selected_id))\n\n\ndef _sparse_true_positive_at_k(labels,\n predictions_idx,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates true positives for recall@k and precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n name: Name of operation.\n\n Returns:\n A [D1, ... DN] `Tensor` of true positive counts.\n \"\"\"\n with ops.name_scope(name, 'true_positives',\n (predictions_idx, labels, weights)):\n labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,\n class_id)\n tp = sets.set_size(sets.set_intersection(predictions_idx, labels))\n tp = math_ops.to_double(tp)\n if weights is not None:\n with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(\n weights, tp),)):\n weights = math_ops.to_double(weights)\n tp = math_ops.multiply(tp, weights)\n return tp\n\n\ndef _streaming_sparse_true_positive_at_k(labels,\n predictions_idx,\n k=None,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates weighted per step true positives for recall@k and precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n name: Name of new variable, and namespace for other dependent ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n\n Raises:\n ValueError: If `weights` is not `None` and has an incompatible shape.\n \"\"\"\n with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id),\n (predictions_idx, labels, weights)) as scope:\n tp = _sparse_true_positive_at_k(\n predictions_idx=predictions_idx,\n labels=labels,\n class_id=class_id,\n weights=weights)\n batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))\n\n var = metric_variable([], dtypes.float64, name=scope)\n return var, state_ops.assign_add(var, batch_total_tp, name='update')\n\n\ndef _sparse_false_negative_at_k(labels,\n predictions_idx,\n class_id=None,\n weights=None):\n \"\"\"Calculates false negatives for recall@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n\n Returns:\n A [D1, ... DN] `Tensor` of false negative counts.\n \"\"\"\n with ops.name_scope(None, 'false_negatives',\n (predictions_idx, labels, weights)):\n labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,\n class_id)\n fn = sets.set_size(\n sets.set_difference(predictions_idx, labels, aminusb=False))\n fn = math_ops.to_double(fn)\n if weights is not None:\n with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(\n weights, fn),)):\n weights = math_ops.to_double(weights)\n fn = math_ops.multiply(fn, weights)\n return fn\n\n\ndef _streaming_sparse_false_negative_at_k(labels,\n predictions_idx,\n k,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates weighted per step false negatives for recall@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n name: Name of new variable, and namespace for other dependent ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n\n Raises:\n ValueError: If `weights` is not `None` and has an incompatible shape.\n \"\"\"\n with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id),\n (predictions_idx, labels, weights)) as scope:\n fn = _sparse_false_negative_at_k(\n predictions_idx=predictions_idx,\n labels=labels,\n class_id=class_id,\n weights=weights)\n batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))\n\n var = metric_variable([], dtypes.float64, name=scope)\n return var, state_ops.assign_add(var, batch_total_fn, name='update')\n\n\n@tf_export('metrics.recall_at_k')\ndef recall_at_k(labels,\n predictions,\n k,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes recall@k of the predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate recall by considering only the\n entries in the batch for which `class_id` is in the label, and computing\n the fraction of them for which `class_id` is in the top-k `predictions`.\n If `class_id` is not specified, we'll calculate recall as how often on\n average a class among the labels of a batch entry is in the top-k\n `predictions`.\n\n `sparse_recall_at_k` creates two local variables,\n `true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute\n the recall_at_k frequency. This frequency is ultimately returned as\n `recall_at_<k>`: an idempotent operation that simply divides\n `true_positive_at_<k>` by total (`true_positive_at_<k>` +\n `false_negative_at_<k>`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false negatives weighted by\n `weights`. Then `update_op` increments `true_positive_at_<k>` and\n `false_negative_at_<k>` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values\n should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range always count\n towards `false_negative_at_<k>`.\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If class_id is outside this range, the method returns NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n recall: Scalar `float64` `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately, and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.recall_at_k is not '\n 'supported when eager execution is enabled.')\n\n with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),\n (predictions, labels, weights)) as scope:\n _, top_k_idx = nn.top_k(predictions, k)\n return recall_at_top_k(\n labels=labels,\n predictions_idx=top_k_idx,\n k=k,\n class_id=class_id,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=scope)\n\n\n@tf_export('metrics.recall_at_top_k')\ndef recall_at_top_k(labels,\n predictions_idx,\n k=None,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes recall@k of top-k predictions with respect to sparse labels.\n\n Differs from `recall_at_k` in that predictions must be in the form of top `k`\n class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`\n for more details.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values\n should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range always count\n towards `false_negative_at_<k>`.\n predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.\n Commonly, N=1 and predictions has shape [batch size, k]. The final\n dimension contains the top `k` predicted class indices. [D1, ... DN] must\n match `labels`.\n k: Integer, k for @k metric. Only used for the default op name.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If class_id is outside this range, the method returns NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n recall: Scalar `float64` `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately, and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),\n (predictions_idx, labels, weights)) as scope:\n labels = _maybe_expand_labels(labels, predictions_idx)\n top_k_idx = math_ops.to_int64(predictions_idx)\n tp, tp_update = _streaming_sparse_true_positive_at_k(\n predictions_idx=top_k_idx,\n labels=labels,\n k=k,\n class_id=class_id,\n weights=weights)\n fn, fn_update = _streaming_sparse_false_negative_at_k(\n predictions_idx=top_k_idx,\n labels=labels,\n k=k,\n class_id=class_id,\n weights=weights)\n\n def compute_recall(_, tp, fn):\n return math_ops.div(tp, math_ops.add(tp, fn), name=scope)\n\n metric = _aggregate_across_replicas(\n metrics_collections, compute_recall, tp, fn)\n\n update = math_ops.div(\n tp_update, math_ops.add(tp_update, fn_update), name='update')\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n return metric, update\n\n\n@tf_export('metrics.recall_at_thresholds')\ndef recall_at_thresholds(labels,\n predictions,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes various recall values for different `thresholds` on `predictions`.\n\n The `recall_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds. `recall[i]` is defined as the total weight\n of values in `predictions` above `thresholds[i]` whose corresponding entry in\n `labels` is `True`, divided by the total weight of `True` values in `labels`\n (`true_positives[i] / (true_positives[i] + false_negatives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `recall`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `recall` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n recall: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.recall_at_thresholds is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'recall_at_thresholds',\n (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights, includes=('tp', 'fn'))\n\n # Avoid division by zero.\n epsilon = 1e-7\n\n def compute_recall(tp, fn, name):\n return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)\n\n def recall_across_replicas(_, values):\n return compute_recall(values['tp'], values['fn'], 'value')\n\n rec = _aggregate_across_replicas(\n metrics_collections, recall_across_replicas, values)\n\n update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return rec, update_op\n\n\n@tf_export('metrics.root_mean_squared_error')\ndef root_mean_squared_error(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the root mean squared error between the labels and predictions.\n\n The `root_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the root mean squared error.\n This average is weighted by `weights`, and it is ultimately returned as\n `root_mean_squared_error`: an idempotent operation that takes the square root\n of the division of `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `root_mean_squared_error`. Internally, a `squared_error` operation computes\n the element-wise square of the difference between `predictions` and `labels`.\n Then `update_op` increments `total` with the reduced sum of the product of\n `weights` and `squared_error`, and it increments `count` with the reduced sum\n of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` of the same shape as `predictions`.\n predictions: A `Tensor` of arbitrary shape.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `root_mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n root_mean_squared_error: A `Tensor` representing the current mean, the value\n of `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `root_mean_squared_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.root_mean_squared_error is not '\n 'supported when eager execution is enabled.')\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=predictions, labels=labels, weights=weights)\n mse, update_mse_op = mean_squared_error(labels, predictions, weights, None,\n None, name or\n 'root_mean_squared_error')\n\n once_across_replicas = lambda _, mse: math_ops.sqrt(mse)\n rmse = _aggregate_across_replicas(\n metrics_collections, once_across_replicas, mse)\n\n update_rmse_op = math_ops.sqrt(update_mse_op)\n if updates_collections:\n ops.add_to_collections(updates_collections, update_rmse_op)\n\n return rmse, update_rmse_op\n\n\n@tf_export('metrics.sensitivity_at_specificity')\ndef sensitivity_at_specificity(labels,\n predictions,\n specificity,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the specificity at a given sensitivity.\n\n The `sensitivity_at_specificity` function creates four local\n variables, `true_positives`, `true_negatives`, `false_positives` and\n `false_negatives` that are used to compute the sensitivity at the given\n specificity value. The threshold for the given specificity value is computed\n and used to evaluate the corresponding sensitivity.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,\n `false_positives` and `false_negatives` counts with the weight of each case\n found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about specificity and sensitivity, see the\n following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n specificity: A scalar value in range `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use for matching the given\n specificity.\n metrics_collections: An optional list of collections that `sensitivity`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n sensitivity: A scalar `Tensor` representing the sensitivity at the given\n `specificity` value.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `sensitivity`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `specificity` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.sensitivity_at_specificity is not '\n 'supported when eager execution is enabled.')\n\n if specificity < 0 or specificity > 1:\n raise ValueError('`specificity` must be in the range [0, 1].')\n\n with variable_scope.variable_scope(name, 'sensitivity_at_specificity',\n (predictions, labels, weights)):\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [\n (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights)\n\n def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):\n specificities = math_ops.div(tn, tn + fp + kepsilon)\n tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)\n tf_index = math_ops.cast(tf_index, dtypes.int32)\n\n # Now, we have the implicit threshold, so compute the sensitivity:\n return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,\n name)\n\n def sensitivity_across_replicas(_, values):\n return compute_sensitivity_at_specificity(\n values['tp'], values['tn'], values['fp'], values['fn'], 'value')\n\n sensitivity = _aggregate_across_replicas(\n metrics_collections, sensitivity_across_replicas, values)\n\n update_op = compute_sensitivity_at_specificity(\n update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],\n 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return sensitivity, update_op\n\n\ndef _expand_and_tile(tensor, multiple, dim=0, name=None):\n \"\"\"Slice `tensor` shape in 2, then tile along the sliced dimension.\n\n A new dimension is inserted in shape of `tensor` before `dim`, then values are\n tiled `multiple` times along the new dimension.\n\n Args:\n tensor: Input `Tensor` or `SparseTensor`.\n multiple: Integer, number of times to tile.\n dim: Integer, dimension along which to tile.\n name: Name of operation.\n\n Returns:\n `Tensor` result of expanding and tiling `tensor`.\n\n Raises:\n ValueError: if `multiple` is less than 1, or `dim` is not in\n `[-rank(tensor), rank(tensor)]`.\n \"\"\"\n if multiple < 1:\n raise ValueError('Invalid multiple %s, must be > 0.' % multiple)\n with ops.name_scope(name, 'expand_and_tile',\n (tensor, multiple, dim)) as scope:\n # Sparse.\n tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)\n if isinstance(tensor, sparse_tensor.SparseTensor):\n if dim < 0:\n expand_dims = array_ops.reshape(\n array_ops.size(tensor.dense_shape) + dim, [1])\n else:\n expand_dims = [dim]\n expanded_shape = array_ops.concat(\n (array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],\n array_ops.slice(tensor.dense_shape, expand_dims, [-1])),\n 0,\n name='expanded_shape')\n expanded = sparse_ops.sparse_reshape(\n tensor, shape=expanded_shape, name='expand')\n if multiple == 1:\n return expanded\n return sparse_ops.sparse_concat(\n dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)\n\n # Dense.\n expanded = array_ops.expand_dims(\n tensor, dim if (dim >= 0) else (dim - 1), name='expand')\n if multiple == 1:\n return expanded\n ones = array_ops.ones_like(array_ops.shape(tensor))\n tile_multiples = array_ops.concat(\n (ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')\n return array_ops.tile(expanded, tile_multiples, name=scope)\n\n\ndef _num_relevant(labels, k):\n \"\"\"Computes number of relevant values for each row in labels.\n\n For labels with shape [D1, ... DN, num_labels], this is the minimum of\n `num_labels` and `k`.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels].\n k: Integer, k for @k metric.\n\n Returns:\n Integer `Tensor` of shape [D1, ... DN], where each value is the number of\n relevant values for that row.\n\n Raises:\n ValueError: if inputs have invalid dtypes or values.\n \"\"\"\n if k < 1:\n raise ValueError('Invalid k=%s.' % k)\n with ops.name_scope(None, 'num_relevant', (labels,)) as scope:\n # For SparseTensor, calculate separate count for each row.\n labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n if isinstance(labels, sparse_tensor.SparseTensor):\n return math_ops.minimum(sets.set_size(labels), k, name=scope)\n\n # For dense Tensor, calculate scalar count based on last dimension, and\n # tile across labels shape.\n labels_shape = array_ops.shape(labels)\n labels_size = labels_shape[-1]\n num_relevant_scalar = math_ops.minimum(labels_size, k)\n return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)\n\n\ndef _sparse_average_precision_at_top_k(labels, predictions_idx):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula\n for each row is:\n\n AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items\n\n A \"row\" is the elements in dimension [D1, ... DN] of `predictions_idx`,\n `labels`, and the result `Tensors`. In the common case, this is [batch_size].\n Each row of the results contains the average precision for that row.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.\n Values should be in range [0, num_classes).\n predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.\n Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final\n dimension must be set and contains the top `k` predicted class indices.\n [D1, ... DN] must match `labels`. Values should be in range\n [0, num_classes).\n\n Returns:\n `float64` `Tensor` of shape [D1, ... DN], where each value is the average\n precision for that row.\n\n Raises:\n ValueError: if the last dimension of predictions_idx is not set.\n \"\"\"\n with ops.name_scope(None, 'average_precision',\n (predictions_idx, labels)) as scope:\n predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')\n if predictions_idx.get_shape().ndims == 0:\n raise ValueError('The rank of predictions_idx must be at least 1.')\n k = predictions_idx.get_shape().as_list()[-1]\n if k is None:\n raise ValueError('The last dimension of predictions_idx must be set.')\n labels = _maybe_expand_labels(labels, predictions_idx)\n\n # Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate\n # prediction for each k, so we can calculate separate true positive values\n # for each k.\n predictions_idx_per_k = array_ops.expand_dims(\n predictions_idx, -1, name='predictions_idx_per_k')\n\n # Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.\n labels_per_k = _expand_and_tile(\n labels, multiple=k, dim=-1, name='labels_per_k')\n\n # The following tensors are all of shape [D1, ... DN, k], containing values\n # per row, per k value.\n # `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at\n # that k value is correct, 0 otherwise. This is the \"rel_{i}\" term from\n # the formula above.\n # `tp_per_k` (int32) - True positive counts.\n # `retrieved_per_k` (int32) - Number of predicted values at each k. This is\n # the precision denominator.\n # `precision_per_k` (float64) - Precision at each k. This is the \"P_{i}\"\n # term from the formula above.\n # `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,\n # precisions at all k for which relevance indicator is true.\n relevant_per_k = _sparse_true_positive_at_k(\n labels_per_k, predictions_idx_per_k, name='relevant_per_k')\n tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')\n retrieved_per_k = math_ops.cumsum(\n array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')\n precision_per_k = math_ops.div(\n math_ops.to_double(tp_per_k),\n math_ops.to_double(retrieved_per_k),\n name='precision_per_k')\n relevant_precision_per_k = math_ops.multiply(\n precision_per_k,\n math_ops.to_double(relevant_per_k),\n name='relevant_precision_per_k')\n\n # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.\n precision_sum = math_ops.reduce_sum(\n relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')\n\n # Divide by number of relevant items to get average precision. These are\n # the \"num_relevant_items\" and \"AveP\" terms from the formula above.\n num_relevant_items = math_ops.to_double(_num_relevant(labels, k))\n return math_ops.div(precision_sum, num_relevant_items, name=scope)\n\n\ndef _streaming_sparse_average_precision_at_top_k(labels,\n predictions_idx,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n `sparse_average_precision_at_top_k` creates two local variables,\n `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that\n are used to compute the frequency. This frequency is ultimately returned as\n `average_precision_at_<k>`: an idempotent operation that simply divides\n `average_precision_at_<k>/total` by `average_precision_at_<k>/max`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate\n the true positives and false positives weighted by `weights`. Then `update_op`\n increments `true_positive_at_<k>` and `false_positive_at_<k>` using these\n values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.\n Values should be in range [0, num_classes).\n predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.\n Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final\n dimension contains the top `k` predicted class indices. [D1, ... DN] must\n match `labels`. Values should be in range [0, num_classes).\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n mean_average_precision: Scalar `float64` `Tensor` with the mean average\n precision values.\n update: `Operation` that increments variables appropriately, and whose\n value matches `metric`.\n \"\"\"\n with ops.name_scope(name, 'average_precision_at_top_k',\n (predictions_idx, labels, weights)) as scope:\n # Calculate per-example average precision, and apply weights.\n average_precision = _sparse_average_precision_at_top_k(\n predictions_idx=predictions_idx, labels=labels)\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(\n math_ops.to_double(weights), average_precision)\n average_precision = math_ops.multiply(average_precision, weights)\n\n # Create accumulation variables and update ops for max average precision and\n # total average precision.\n with ops.name_scope(None, 'max', (average_precision,)) as max_scope:\n # `max` is the max possible precision. Since max for any row is 1.0:\n # - For the unweighted case, this is just the number of rows.\n # - For the weighted case, it's the sum of the weights broadcast across\n # `average_precision` rows.\n max_var = metric_variable([], dtypes.float64, name=max_scope)\n if weights is None:\n batch_max = math_ops.to_double(\n array_ops.size(average_precision, name='batch_max'))\n else:\n batch_max = math_ops.reduce_sum(weights, name='batch_max')\n max_update = state_ops.assign_add(max_var, batch_max, name='update')\n with ops.name_scope(None, 'total', (average_precision,)) as total_scope:\n total_var = metric_variable([], dtypes.float64, name=total_scope)\n batch_total = math_ops.reduce_sum(average_precision, name='batch_total')\n total_update = state_ops.assign_add(total_var, batch_total, name='update')\n\n # Divide total by max to get mean, for both vars and the update ops.\n def precision_across_replicas(_, total_var, max_var):\n return _safe_scalar_div(total_var, max_var, name='mean')\n\n mean_average_precision = _aggregate_across_replicas(\n metrics_collections, precision_across_replicas, total_var, max_var)\n\n update = _safe_scalar_div(total_update, max_update, name=scope)\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n\n return mean_average_precision, update\n\n\n@tf_export('metrics.sparse_average_precision_at_k')\n@deprecated(None, 'Use average_precision_at_k instead')\ndef sparse_average_precision_at_k(labels,\n predictions,\n k,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Renamed to `average_precision_at_k`, please use that method instead.\"\"\"\n return average_precision_at_k(\n labels=labels,\n predictions=predictions,\n k=k,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@tf_export('metrics.average_precision_at_k')\ndef average_precision_at_k(labels,\n predictions,\n k,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n `average_precision_at_k` creates two local variables,\n `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that\n are used to compute the frequency. This frequency is ultimately returned as\n `average_precision_at_<k>`: an idempotent operation that simply divides\n `average_precision_at_<k>/total` by `average_precision_at_<k>/max`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_<k>` and\n `false_positive_at_<k>` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values\n should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range are ignored.\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and `predictions` has shape\n [batch size, num_classes]. The final dimension contains the logit values\n for each class. [D1, ... DN] must match `labels`.\n k: Integer, k for @k metric. This will calculate an average precision for\n range `[1,k]`, as documented above.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n mean_average_precision: Scalar `float64` `Tensor` with the mean average\n precision values.\n update: `Operation` that increments variables appropriately, and whose\n value matches `metric`.\n\n Raises:\n ValueError: if k is invalid.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not '\n 'supported when eager execution is enabled.')\n\n if k < 1:\n raise ValueError('Invalid k=%s.' % k)\n with ops.name_scope(name, _at_k_name('average_precision', k),\n (predictions, labels, weights)) as scope:\n # Calculate top k indices to produce [D1, ... DN, k] tensor.\n _, predictions_idx = nn.top_k(predictions, k)\n return _streaming_sparse_average_precision_at_top_k(\n labels=labels,\n predictions_idx=predictions_idx,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=scope)\n\n\ndef _sparse_false_positive_at_k(labels,\n predictions_idx,\n class_id=None,\n weights=None):\n \"\"\"Calculates false positives for precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n\n Returns:\n A [D1, ... DN] `Tensor` of false positive counts.\n \"\"\"\n with ops.name_scope(None, 'false_positives',\n (predictions_idx, labels, weights)):\n labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,\n class_id)\n fp = sets.set_size(\n sets.set_difference(predictions_idx, labels, aminusb=True))\n fp = math_ops.to_double(fp)\n if weights is not None:\n with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(\n weights, fp),)):\n weights = math_ops.to_double(weights)\n fp = math_ops.multiply(fp, weights)\n return fp\n\n\ndef _streaming_sparse_false_positive_at_k(labels,\n predictions_idx,\n k=None,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates weighted per step false positives for precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n name: Name of new variable, and namespace for other dependent ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n\n Raises:\n ValueError: If `weights` is not `None` and has an incompatible shape.\n \"\"\"\n with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id),\n (predictions_idx, labels, weights)) as scope:\n fp = _sparse_false_positive_at_k(\n predictions_idx=predictions_idx,\n labels=labels,\n class_id=class_id,\n weights=weights)\n batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))\n\n var = metric_variable([], dtypes.float64, name=scope)\n return var, state_ops.assign_add(var, batch_total_fp, name='update')\n\n\n@tf_export('metrics.precision_at_top_k')\ndef precision_at_top_k(labels,\n predictions_idx,\n k=None,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of the predictions with respect to sparse labels.\n\n Differs from `sparse_precision_at_k` in that predictions must be in the form\n of top `k` class indices, whereas `sparse_precision_at_k` expects logits.\n Refer to `sparse_precision_at_k` for more details.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values\n should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range are ignored.\n predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, k].\n The final dimension contains the top `k` predicted class indices.\n [D1, ... DN] must match `labels`.\n k: Integer, k for @k metric. Only used for the default op name.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes], where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.precision_at_top_k is not '\n 'supported when eager execution is enabled.')\n\n with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),\n (predictions_idx, labels, weights)) as scope:\n labels = _maybe_expand_labels(labels, predictions_idx)\n top_k_idx = math_ops.to_int64(predictions_idx)\n tp, tp_update = _streaming_sparse_true_positive_at_k(\n predictions_idx=top_k_idx,\n labels=labels,\n k=k,\n class_id=class_id,\n weights=weights)\n fp, fp_update = _streaming_sparse_false_positive_at_k(\n predictions_idx=top_k_idx,\n labels=labels,\n k=k,\n class_id=class_id,\n weights=weights)\n\n def precision_across_replicas(_, tp, fp):\n return math_ops.div(tp, math_ops.add(tp, fp), name=scope)\n\n metric = _aggregate_across_replicas(\n metrics_collections, precision_across_replicas, tp, fp)\n\n update = math_ops.div(\n tp_update, math_ops.add(tp_update, fp_update), name='update')\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n return metric, update\n\n\n@tf_export('metrics.sparse_precision_at_k')\n@deprecated(None, 'Use precision_at_k instead')\ndef sparse_precision_at_k(labels,\n predictions,\n k,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Renamed to `precision_at_k`, please use that method instead.\"\"\"\n return precision_at_k(\n labels=labels,\n predictions=predictions,\n k=k,\n class_id=class_id,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@tf_export('metrics.precision_at_k')\ndef precision_at_k(labels,\n predictions,\n k,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of the predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate precision by considering only the\n entries in the batch for which `class_id` is in the top-k highest\n `predictions`, and computing the fraction of them for which `class_id` is\n indeed a correct label.\n If `class_id` is not specified, we'll calculate precision as how often on\n average a class among the top-k classes with the highest predicted values\n of a batch entry is correct and can be found in the label for that entry.\n\n `precision_at_k` creates two local variables,\n `true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute\n the precision@k frequency. This frequency is ultimately returned as\n `precision_at_<k>`: an idempotent operation that simply divides\n `true_positive_at_<k>` by total (`true_positive_at_<k>` +\n `false_positive_at_<k>`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_<k>` and\n `false_positive_at_<k>` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values\n should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range are ignored.\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes], where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.sparse_precision_at_k is not '\n 'supported when eager execution is enabled.')\n\n with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),\n (predictions, labels, weights)) as scope:\n _, top_k_idx = nn.top_k(predictions, k)\n return precision_at_top_k(\n labels=labels,\n predictions_idx=top_k_idx,\n k=k,\n class_id=class_id,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=scope)\n\n\n@tf_export('metrics.specificity_at_sensitivity')\ndef specificity_at_sensitivity(labels,\n predictions,\n sensitivity,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the specificity at a given sensitivity.\n\n The `specificity_at_sensitivity` function creates four local\n variables, `true_positives`, `true_negatives`, `false_positives` and\n `false_negatives` that are used to compute the specificity at the given\n sensitivity value. The threshold for the given sensitivity value is computed\n and used to evaluate the corresponding specificity.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `specificity`. `update_op` increments the `true_positives`, `true_negatives`,\n `false_positives` and `false_negatives` counts with the weight of each case\n found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about specificity and sensitivity, see the\n following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n sensitivity: A scalar value in range `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use for matching the given\n sensitivity.\n metrics_collections: An optional list of collections that `specificity`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n specificity: A scalar `Tensor` representing the specificity at the given\n `specificity` value.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `specificity`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `sensitivity` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.specificity_at_sensitivity is not '\n 'supported when eager execution is enabled.')\n\n if sensitivity < 0 or sensitivity > 1:\n raise ValueError('`sensitivity` must be in the range [0, 1].')\n\n with variable_scope.variable_scope(name, 'specificity_at_sensitivity',\n (predictions, labels, weights)):\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [\n (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]\n\n values, update_ops = _confusion_matrix_at_thresholds(\n labels, predictions, thresholds, weights)\n\n def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):\n \"\"\"Computes the specificity at the given sensitivity.\n\n Args:\n tp: True positives.\n tn: True negatives.\n fp: False positives.\n fn: False negatives.\n name: The name of the operation.\n\n Returns:\n The specificity using the aggregated values.\n \"\"\"\n sensitivities = math_ops.div(tp, tp + fn + kepsilon)\n\n # We'll need to use this trick until tf.argmax allows us to specify\n # whether we should use the first or last index in case of ties.\n min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))\n indices_at_minval = math_ops.equal(\n math_ops.abs(sensitivities - sensitivity), min_val)\n indices_at_minval = math_ops.to_int64(indices_at_minval)\n indices_at_minval = math_ops.cumsum(indices_at_minval)\n tf_index = math_ops.argmax(indices_at_minval, 0)\n tf_index = math_ops.cast(tf_index, dtypes.int32)\n\n # Now, we have the implicit threshold, so compute the specificity:\n return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon,\n name)\n\n def specificity_across_replicas(_, values):\n return compute_specificity_at_sensitivity(\n values['tp'], values['tn'], values['fp'], values['fn'], 'value')\n\n specificity = _aggregate_across_replicas(\n metrics_collections, specificity_across_replicas, values)\n\n update_op = compute_specificity_at_sensitivity(\n update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],\n 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return specificity, update_op\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Private convenience functions for RaggedTensors.\n\nNone of these methods are exposed in the main \"ragged\" package.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef convert_to_int_tensor(tensor, name, dtype=dtypes.int32):\n \"\"\"Converts the given value to an integer Tensor.\"\"\"\n tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)\n if tensor.dtype.is_integer:\n tensor = math_ops.cast(tensor, dtype)\n else:\n raise TypeError(\n \"%s must be an integer tensor; dtype=%s\" % (name, tensor.dtype))\n return tensor\n\n\ndef get_positive_axis(axis, ndims):\n \"\"\"Validate an `axis` parameter, and normalize it to be positive.\n\n If `ndims` is known (i.e., not `None`), then check that `axis` is in the\n range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or\n `axis + ndims` (otherwise).\n If `ndims` is not known, and `axis` is positive, then return it as-is.\n If `ndims` is not known, and `axis` is negative, then report an error.\n\n Args:\n axis: An integer constant\n ndims: An integer constant, or `None`\n\n Returns:\n The normalized `axis` value.\n\n Raises:\n ValueError: If `axis` is out-of-bounds, or if `axis` is negative and\n `ndims is None`.\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"axis must be an int; got %s\" % type(axis).__name__)\n if ndims is not None:\n if 0 <= axis < ndims:\n return axis\n elif -ndims <= axis < 0:\n return axis + ndims\n else:\n raise ValueError(\n \"axis=%s out of bounds: expected %s<=axis<%s\" % (axis, -ndims, ndims))\n elif axis < 0:\n raise ValueError(\"axis may only be negative if ndims is statically known.\")\n return axis\n\n\ndef assert_splits_match(nested_splits_lists):\n \"\"\"Checks that the given splits lists are identical.\n\n Performs static tests to ensure that the given splits lists are identical,\n and returns a list of control dependency op tensors that check that they are\n fully identical.\n\n Args:\n nested_splits_lists: A list of nested_splits_lists, where each split_list is\n a list of `splits` tensors from a `RaggedTensor`, ordered from outermost\n ragged dimension to innermost ragged dimension.\n\n Returns:\n A list of control dependency op tensors.\n Raises:\n ValueError: If the splits are not identical.\n \"\"\"\n error_msg = \"Inputs must have identical ragged splits\"\n for splits_list in nested_splits_lists:\n if len(splits_list) != len(nested_splits_lists[0]):\n raise ValueError(error_msg)\n return [\n check_ops.assert_equal(s1, s2, message=error_msg)\n for splits_list in nested_splits_lists[1:]\n for (s1, s2) in zip(nested_splits_lists[0], splits_list)\n ]\n\n\n# This op is intended to exactly match the semantics of numpy.repeat, with\n# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior\n# when axis is not specified. Rather than implement that special behavior, we\n# simply make `axis` be a required argument.\n#\n# External (OSS) `tf.repeat` feature request:\n# https://github.com/tensorflow/tensorflow/issues/8246\ndef repeat(data, repeats, axis, name=None):\n \"\"\"Repeats elements of `data`.\n\n Args:\n data: An `N`-dimensional tensor.\n repeats: A 1-D integer tensor specifying how many times each element in\n `axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.\n Supports broadcasting from a scalar value.\n axis: `int`. The axis along which to repeat values. Must be less than\n `max(N, 1)`.\n name: A name for the operation.\n\n Returns:\n A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,\n except that dimension `axis` has size `sum(repeats)`.\n\n #### Examples:\n ```python\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n ['a', 'a', 'a', 'c', 'c']\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n [[1, 2], [1, 2], [3, 4], [3, 4], [3, 4]]\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n [[1, 1, 2, 2, 2], [3, 3, 4, 4, 4]]\n ```\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"axis must be an int; got %s\" % type(axis).__name__)\n\n with ops.name_scope(name, \"Repeat\", [data, repeats]):\n data = ops.convert_to_tensor(data, name=\"data\")\n repeats = convert_to_int_tensor(repeats, name=\"repeats\")\n repeats.shape.with_rank_at_most(1)\n\n # If `data` is a scalar, then upgrade it to a vector.\n data = _with_nonzero_rank(data)\n data_shape = array_ops.shape(data)\n\n # If `axis` is negative, then convert it to a positive value.\n axis = get_positive_axis(axis, data.shape.ndims)\n\n # Check data Tensor shapes.\n if repeats.shape.ndims == 1:\n data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])\n\n # If we know that `repeats` is a scalar, then we can just tile & reshape.\n if repeats.shape.ndims == 0:\n expanded = array_ops.expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, repeats)\n result_shape = array_ops.concat(\n [data_shape[:axis], [-1], data_shape[axis + 1:]], axis=0)\n return array_ops.reshape(tiled, result_shape)\n\n # Broadcast the `repeats` tensor so rank(repeats) == axis + 1.\n if repeats.shape.ndims != axis + 1:\n repeats_shape = array_ops.shape(repeats)\n repeats_ndims = array_ops.rank(repeats)\n broadcast_shape = array_ops.concat(\n [data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)\n repeats = array_ops.broadcast_to(repeats, broadcast_shape)\n repeats.set_shape([None] * (axis + 1))\n\n # Create a \"sequence mask\" based on `repeats`, where slices across `axis`\n # contain one `True` value for each repetition. E.g., if\n # `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.\n max_repeat = math_ops.maximum(0, math_ops.reduce_max(repeats))\n mask = array_ops.sequence_mask(repeats, max_repeat)\n\n # Add a new dimension around each value that needs to be repeated, and\n # then tile that new dimension to match the maximum number of repetitions.\n expanded = array_ops.expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, max_repeat)\n\n # Use `boolean_mask` to discard the extra repeated values. This also\n # flattens all dimensions up through `axis`.\n masked = array_ops.boolean_mask(tiled, mask)\n\n # Reshape the output tensor to add the outer dimensions back.\n if axis == 0:\n result = masked\n else:\n result_shape = array_ops.concat(\n [data_shape[:axis], [-1], data_shape[axis + 1:]], axis=0)\n result = array_ops.reshape(masked, result_shape)\n\n # Preserve shape information.\n if data.shape.ndims is not None:\n new_axis_size = 0 if repeats.shape[0] == 0 else None\n result.set_shape(data.shape[:axis].concatenate(\n [new_axis_size]).concatenate(data.shape[axis + 1:]))\n\n return result\n\n\ndef tile_one_dimension(data, axis, multiple):\n \"\"\"Tiles a single dimension of a tensor.\"\"\"\n # Assumes axis is a nonnegative int.\n if data.shape.ndims is not None:\n multiples = [1] * data.shape.ndims\n multiples[axis] = multiple\n else:\n ones = array_ops.ones(array_ops.rank(data), dtypes.int32)\n multiples = array_ops.concat([ones[:axis], [multiple], ones[axis + 1:]],\n axis=0)\n return array_ops.tile(data, multiples)\n\n\ndef _with_nonzero_rank(data):\n \"\"\"If `data` is scalar, then add a dimension; otherwise return as-is.\"\"\"\n if data.shape.ndims is not None:\n if data.shape.ndims == 0:\n return array_ops.stack([data])\n else:\n return data\n else:\n data_shape = array_ops.shape(data)\n data_ndims = array_ops.rank(data)\n return array_ops.reshape(\n data,\n array_ops.concat([[1], data_shape], axis=0)[-data_ndims:])\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for control_flow module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.autograph.converters import control_flow\nfrom tensorflow.python.autograph.core import converter_testing\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.platform import test\n\n\nclass ControlFlowTest(converter_testing.TestCase):\n\n def assertTransformedResult(self, test_fn, inputs, expected):\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n with self.converted(test_fn, control_flow, {},\n constant_op.constant) as result:\n with self.cached_session() as sess:\n self.assertEqual(sess.run(result.test_fn(*inputs)), expected)\n\n def test_while_basic(self):\n\n def test_fn(n):\n i = 0\n s = 0\n while i < n:\n s += i\n i += 1\n return s, i, n\n\n self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))\n\n def test_while_nested(self):\n\n def test_fn(n):\n i = 0\n j = 0\n s = 0\n while i < n:\n while j < i:\n j += 3\n u = i + j # 'u' is not defined within the inner loop\n s += u\n i += 1\n j = 0\n return s, i, j, n\n\n self.assertTransformedResult(test_fn, constant_op.constant(5),\n (25, 5, 0, 5))\n\n def test_while_single_output(self):\n\n def test_fn(n):\n while n > 0:\n n -= 1\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(5), 0)\n\n def test_while_variable_defined_in_body(self):\n def bad_while_loop(n):\n while n > 0:\n n -= 1\n s = n\n return s\n\n node, ctx = self.prepare(bad_while_loop, {})\n with self.assertRaises(NameError):\n control_flow.transform(node, ctx)\n\n def test_if_basic(self):\n\n def test_fn(n):\n a = 0\n b = 0\n if n > 0:\n a = -n\n else:\n b = 2 * n\n return a, b\n\n self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))\n self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))\n\n def test_if_complex_outputs(self):\n\n class TestClass(object):\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def test_fn(n, obj):\n obj.a = 0\n obj.b = 0\n if n > 0:\n obj.a = -n\n else:\n obj.b = 2 * n\n return obj\n\n with self.converted(test_fn, control_flow, {}) as result:\n with self.cached_session() as sess:\n res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))\n self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))\n res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))\n self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))\n\n def test_if_single_output(self):\n\n def test_fn(n):\n if n > 0:\n n = -n\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(1), -1)\n\n def test_if_semi(self):\n\n def test_fn(n):\n if n > 0:\n n = 3\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(2), 3)\n self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)\n\n def test_if_local_var(self):\n\n def test_fn(n):\n if n > 0:\n b = 4\n n = b + 1\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(1), 5)\n self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)\n\n def test_if_no_outputs(self):\n\n def test_fn(n):\n if n > 0:\n b = 4 # pylint:disable=unused-variable\n return n\n\n # Without side effect guards, the if statement will stage a cond,\n # but that will be pruned at execution.\n self.assertTransformedResult(test_fn, constant_op.constant(1), 1)\n self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)\n\n def test_if_imbalanced_outputs(self):\n\n def test_fn(n):\n if n > 0:\n b = 4\n return b\n\n node, ctx = self.prepare(test_fn, {})\n with self.assertRaises(transformer.AutographParseError):\n control_flow.transform(node, ctx)\n\n def test_simple_for(self):\n\n def test_fn(l):\n s1 = 0\n s2 = 0\n for e in l:\n s1 += e\n s2 += e * e\n return s1, s2\n\n self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))\n empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)\n self.assertTransformedResult(test_fn, empty_vector, (0, 0))\n\n def test_for_single_output(self):\n\n def test_fn(l):\n s = 0\n for e in l:\n s += e\n return s\n\n self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)\n empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)\n self.assertTransformedResult(test_fn, empty_vector, 0)\n\n def test_for_iterated_expression(self):\n\n eval_count = [0]\n\n def count_evals(x):\n eval_count[0] += 1\n return x\n\n def test_fn(n):\n s = 0\n for e in count_evals(range(n)):\n s += e\n return s\n\n ns = {'count_evals': count_evals}\n node, ctx = self.prepare(test_fn, ns)\n node = control_flow.transform(node, ctx)\n\n with self.compiled(node, ns) as result:\n self.assertEqual(result.test_fn(5), 10)\n self.assertEqual(eval_count[0], 1)\n\n def test_for_variable_defined_in_body(self):\n def bad_for_loop(n):\n for i in range(n):\n s = i\n return s\n\n node, ctx = self.prepare(bad_for_loop, {})\n with self.assertRaises(NameError):\n control_flow.transform(node, ctx)\n\n def test_for_tuple_unpacking(self):\n def test_fn(x_list):\n z = tf.constant(0) # pylint:disable=undefined-variable\n for i, x in enumerate(x_list):\n z = z + x + i\n return z\n\n self.assertTransformedResult(test_fn, [3, 3], 7)\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport os\n\nimport numpy\nimport six\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.keras.layers import core\nfrom tensorflow.python.keras.layers import normalization\nfrom tensorflow.python.layers import core as non_keras_core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.checkpointable import data_structures\nfrom tensorflow.python.training.checkpointable import tracking\nfrom tensorflow.python.training.checkpointable import util\n\n\nclass HasList(training.Model):\n\n def __init__(self):\n super(HasList, self).__init__()\n self.layer_list = data_structures.List([core.Dense(3)])\n self.layer_list.append(core.Dense(4))\n self.layer_list.extend(\n [core.Dense(5),\n core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])\n self.layer_list += [\n core.Dense(7, bias_regularizer=math_ops.reduce_sum),\n core.Dense(8)\n ]\n self.layer_list += (\n data_structures.List([core.Dense(9)]) + data_structures.List(\n [core.Dense(10)]))\n self.layer_list.extend(\n data_structures.List(\n list(sequence=[core.Dense(11)]) + [core.Dense(12)]))\n self.layers_with_updates = data_structures.List(\n sequence=(normalization.BatchNormalization(),))\n\n def call(self, x):\n aggregation = 0.\n for l in self.layer_list:\n x = l(x)\n aggregation += math_ops.reduce_sum(x)\n bn, = self.layers_with_updates\n return bn(x) / aggregation\n\n\nclass ListTests(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def testTracking(self):\n model = HasList()\n output = model(array_ops.ones([32, 2]))\n self.assertAllEqual([32, 12], output.shape)\n self.assertEqual(11, len(model.layers))\n self.assertEqual(10, len(model.layer_list.layers))\n six.assertCountEqual(\n self,\n model.layers,\n model.layer_list.layers + model.layers_with_updates)\n for index in range(10):\n self.assertEqual(3 + index, model.layer_list.layers[index].units)\n self.assertEqual(2, len(model._checkpoint_dependencies))\n self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)\n self.assertIs(model.layers_with_updates,\n model._checkpoint_dependencies[1].ref)\n self.assertEqual(\n 10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))\n self.evaluate([v.initializer for v in model.variables])\n self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))\n model.load_weights(save_path)\n self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],\n self.evaluate(model.variables[0]))\n v = variables.Variable(1.)\n model.var_list = [v]\n self.assertIn(v, model.variables)\n self.assertIn(v, model.trainable_variables)\n self.assertNotIn(v, model.non_trainable_variables)\n\n def testUpdatesForwarded(self):\n with context.graph_mode():\n model = HasList()\n model_input = array_ops.ones([32, 2])\n model(model_input)\n self.assertGreater(len(model.layers_with_updates[0].updates), 0)\n self.assertEqual(set(model.layers_with_updates[0].updates),\n set(model.updates))\n\n with context.eager_mode():\n model = HasList()\n model_input = array_ops.ones([32, 2])\n model(model_input)\n self.assertEqual(0, len(model.updates))\n\n @test_util.run_in_graph_and_eager_modes\n def testLossesForwarded(self):\n model = HasList()\n model_input = array_ops.ones([32, 2])\n model(model_input)\n self.assertEqual(2, len(model.losses))\n\n def testModelContainersCompareEqual(self):\n class HasEqualContainers(training.Model):\n\n def __init__(self):\n super(HasEqualContainers, self).__init__()\n self.l1 = []\n self.l2 = []\n\n model = HasEqualContainers()\n first_layer = HasEqualContainers()\n model.l1.append(first_layer)\n second_layer = HasEqualContainers()\n model.l2.append(second_layer)\n self.assertEqual([first_layer, second_layer], model.layers)\n\n def testNotCheckpointable(self):\n class NotCheckpointable(object):\n pass\n\n with self.assertRaises(ValueError):\n data_structures.List([NotCheckpointable()])\n\n def testCallNotImplemented(self):\n with self.assertRaisesRegexp(TypeError, \"not callable\"):\n data_structures.List()(1.)\n\n def testNoPop(self):\n with self.assertRaises(AttributeError):\n data_structures.List().pop()\n\n @test_util.run_in_graph_and_eager_modes\n def testTensorConversion(self):\n\n class ListToTensor(training.Model):\n\n def __init__(self):\n super(ListToTensor, self).__init__()\n self.l = [1., 2., 3.]\n\n self.assertAllEqual(\n [1., 2., 3.],\n self.evaluate(constant_op.constant(ListToTensor().l)))\n\n self.assertAllEqual(\n [1., 2., 3.],\n self.evaluate(array_ops.pack(ListToTensor().l)))\n\n def testNesting(self):\n with context.graph_mode():\n inner = data_structures.List()\n outer = data_structures.List([inner])\n inner.append(non_keras_core.Dense(1))\n inner[0](array_ops.ones([2, 3]))\n self.assertEqual(2, len(outer.variables))\n self.assertIsInstance(\n outer.variables[0],\n resource_variable_ops.ResourceVariable)\n\n def testNonLayerVariables(self):\n v = resource_variable_ops.ResourceVariable([1.])\n l = data_structures.List([v])\n self.assertTrue(l.trainable)\n self.assertEqual([], l.layers)\n self.assertEqual([v], l.variables)\n self.assertEqual([v], l.trainable_weights)\n self.assertEqual([], l.non_trainable_variables)\n l.trainable = False\n self.assertEqual([v], l.variables)\n self.assertEqual([], l.trainable_variables)\n self.assertEqual([v], l.non_trainable_variables)\n l.trainable = True\n v2 = resource_variable_ops.ResourceVariable(1., trainable=False)\n l.append(v2)\n self.assertEqual([v, v2], l.weights)\n self.assertEqual([v], l.trainable_weights)\n self.assertEqual([v2], l.non_trainable_weights)\n\n def testListWrapperBasic(self):\n # _ListWrapper, unlike List, compares like the built-in list type (since it\n # is used to automatically replace lists).\n a = tracking.Checkpointable()\n b = tracking.Checkpointable()\n self.assertEqual([a, a],\n [a, a])\n self.assertEqual(data_structures._ListWrapper([a, a]),\n data_structures._ListWrapper([a, a]))\n self.assertEqual([a, a],\n data_structures._ListWrapper([a, a]))\n self.assertEqual(data_structures._ListWrapper([a, a]),\n [a, a])\n self.assertNotEqual([a, a],\n [b, a])\n self.assertNotEqual(data_structures._ListWrapper([a, a]),\n data_structures._ListWrapper([b, a]))\n self.assertNotEqual([a, a],\n data_structures._ListWrapper([b, a]))\n self.assertLess([a], [a, b])\n self.assertLess(data_structures._ListWrapper([a]),\n data_structures._ListWrapper([a, b]))\n self.assertLessEqual([a], [a, b])\n self.assertLessEqual(data_structures._ListWrapper([a]),\n data_structures._ListWrapper([a, b]))\n self.assertGreater([a, b], [a])\n self.assertGreater(data_structures._ListWrapper([a, b]),\n data_structures._ListWrapper([a]))\n self.assertGreaterEqual([a, b], [a])\n self.assertGreaterEqual(data_structures._ListWrapper([a, b]),\n data_structures._ListWrapper([a]))\n self.assertEqual([a], data_structures._ListWrapper([a]))\n self.assertEqual([a], list(data_structures.List([a])))\n self.assertEqual([a, a], data_structures._ListWrapper([a]) + [a])\n self.assertEqual([a, a], [a] + data_structures._ListWrapper([a]))\n self.assertIsInstance(data_structures._ListWrapper([a]), list)\n\n def testWrapperChangesList(self):\n l = []\n l_wrapper = data_structures._ListWrapper(l)\n l_wrapper.append(1)\n self.assertEqual([1], l)\n\n def testListChangesWrapper(self):\n l = []\n l_wrapper = data_structures._ListWrapper(l)\n l.append(1)\n self.assertEqual([1], l_wrapper)\n\n def testHashing(self):\n has_sequences = set([data_structures.List(),\n data_structures.List()])\n self.assertEqual(2, len(has_sequences))\n self.assertNotIn(data_structures.List(), has_sequences)\n with self.assertRaises(TypeError):\n has_sequences.add(data_structures._ListWrapper([]))\n\n\nclass HasMapping(training.Model):\n\n def __init__(self):\n super(HasMapping, self).__init__()\n self.layer_dict = data_structures.Mapping(output=core.Dense(7))\n self.layer_dict[\"norm\"] = data_structures.List()\n self.layer_dict[\"dense\"] = data_structures.List()\n self.layer_dict[\"dense\"].extend(\n [core.Dense(5),\n core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])\n self.layer_dict[\"norm\"].append(\n normalization.BatchNormalization())\n self.layer_dict[\"norm\"].append(\n normalization.BatchNormalization())\n\n def call(self, x):\n aggregation = 0.\n for norm, dense in zip(self.layer_dict[\"norm\"], self.layer_dict[\"dense\"]):\n x = norm(dense(x))\n aggregation += math_ops.reduce_sum(x)\n return self.layer_dict[\"output\"](x) / aggregation\n\n\nclass MappingTests(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def testTracking(self):\n model = HasMapping()\n output = model(array_ops.ones([32, 2]))\n self.assertAllEqual([32, 7], output.shape)\n self.assertEqual(5, len(model.layers))\n six.assertCountEqual(self, model.layers, model.layer_dict.layers)\n self.assertEqual(1, len(model._checkpoint_dependencies))\n self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)\n self.evaluate([v.initializer for v in model.variables])\n test_var = model.layer_dict[\"output\"].kernel\n self.evaluate(test_var.assign(array_ops.ones([6, 7])))\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n self.evaluate(test_var.assign(array_ops.zeros([6, 7])))\n model.load_weights(save_path)\n self.assertAllEqual(numpy.ones([6, 7]),\n self.evaluate(test_var))\n\n def testNoOverwrite(self):\n mapping = data_structures.Mapping()\n original = data_structures.List()\n mapping[\"a\"] = original\n with self.assertRaises(ValueError):\n mapping[\"a\"] = data_structures.List()\n self.assertIs(original, mapping[\"a\"])\n with self.assertRaises(AttributeError):\n del mapping[\"a\"]\n mapping.update(b=data_structures.Mapping())\n with self.assertRaises(ValueError):\n mapping.update({\"b\": data_structures.Mapping()})\n\n def testNonStringKeys(self):\n mapping = data_structures.Mapping()\n with self.assertRaises(TypeError):\n mapping[1] = data_structures.List()\n\n def testHashing(self):\n has_mappings = set([data_structures.Mapping(),\n data_structures.Mapping()])\n self.assertEqual(2, len(has_mappings))\n self.assertNotIn(data_structures.Mapping(), has_mappings)\n # In contrast to Mapping, dict wrappers are not hashable\n a = tracking.Checkpointable()\n a.d = {}\n self.assertEqual({}, a.d)\n self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison\n self.assertNotEqual({1: 2}, a.d)\n with self.assertRaisesRegexp(TypeError, \"unhashable\"):\n set([a.d])\n\n def testDictWrapperBadKeys(self):\n a = tracking.Checkpointable()\n a.d = {}\n a.d[1] = data_structures.List()\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"non-string key\"):\n model.save_weights(save_path)\n\n def testDictWrapperNoDependency(self):\n a = tracking.Checkpointable()\n a.d = data_structures.NoDependency({})\n a.d[1] = [3]\n self.assertEqual([a], util.list_objects(a))\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n model.load_weights(save_path)\n\n def testNonStringKeyNotCheckpointableValue(self):\n a = tracking.Checkpointable()\n a.d = {}\n a.d[\"a\"] = [3]\n a.d[1] = data_structures.NoDependency([3])\n self.assertEqual([a, a.d, a.d[\"a\"]], util.list_objects(a))\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n model.load_weights(save_path)\n\n def testNonAppendNotCheckpointable(self):\n # Non-append mutations (deleting or overwriting values) are OK when the\n # values aren't tracked.\n a = tracking.Checkpointable()\n a.d = {}\n a.d[\"a\"] = [3]\n a.d[1] = 3\n a.d[1] = 2\n self.assertEqual(2, a.d[1])\n del a.d[1]\n a.d[2] = data_structures.NoDependency(tracking.Checkpointable())\n second = tracking.Checkpointable()\n a.d[2] = data_structures.NoDependency(second)\n self.assertIs(second, a.d[2])\n self.assertEqual([a, a.d, a.d[\"a\"]], util.list_objects(a))\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n model.load_weights(save_path)\n\n def testDelNoSave(self):\n model = training.Model()\n model.d = {}\n model.d[\"a\"] = []\n del model.d[\"a\"]\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"overwritten or deleted\"):\n model.save_weights(save_path)\n\n def testPopNoSave(self):\n model = training.Model()\n model.d = {}\n model.d[\"a\"] = []\n model.d.pop(\"a\")\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"overwritten or deleted\"):\n model.save_weights(save_path)\n\n def testExternalModificationNoSave(self):\n model = training.Model()\n external_reference = {}\n model.d = external_reference\n external_reference[\"a\"] = []\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"modified outside the wrapper\"):\n model.save_weights(save_path)\n\n def testOverwriteNoSave(self):\n model = training.Model()\n model.d = {}\n model.d[\"a\"] = {}\n model.d[\"a\"] = {}\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"overwritten or deleted\"):\n model.save_weights(save_path)\n\n def testIter(self):\n model = training.Model()\n model.d = {1: 3}\n model.d[1] = 3\n self.assertEqual([1], list(model.d))\n new_dict = {}\n # This update() is super tricky. If the dict wrapper subclasses dict,\n # CPython will access its storage directly instead of calling any\n # methods/properties on the object. So the options are either not to\n # subclass dict (in which case update will call normal iter methods, but the\n # object won't pass isinstance checks) or to subclass dict and keep that\n # storage updated (no shadowing all its methods like _ListWrapper).\n new_dict.update(model.d)\n self.assertEqual({1: 3}, new_dict)\n\n def testListShallowCopy(self):\n root = tracking.Checkpointable()\n orig_list = [[1.]]\n root.a = orig_list\n copied = copy.copy(root.a)\n self.assertAllEqual([[1.]], copied)\n self.assertIsNot(root.a, copied)\n self.assertIs(root.a[0], copied[0])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_list.append(1.)\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.copy(root.a))\n\n def testListDeepCopy(self):\n root = tracking.Checkpointable()\n orig_list = [[1.]]\n root.a = orig_list\n copied = copy.deepcopy(root.a)\n self.assertAllEqual([[1.]], copied)\n self.assertIsNot(root.a, copied)\n self.assertIsNot(root.a[0], copied[0])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_list.append(1.)\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.deepcopy(root.a))\n\n def testDictShallowCopy(self):\n root = tracking.Checkpointable()\n orig_dict = {\"a\": [1.]}\n root.a = orig_dict\n copied = copy.copy(root.a)\n self.assertAllEqual([1.], copied[\"a\"])\n self.assertIsNot(root.a, copied)\n self.assertIs(root.a[\"a\"], copied[\"a\"])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_dict[\"b\"] = []\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.copy(root.a))\n\n def testDictDeepCopy(self):\n root = tracking.Checkpointable()\n orig_dict = {\"a\": [1.]}\n root.a = orig_dict\n copied = copy.deepcopy(root.a)\n self.assertAllEqual([1.], copied[\"a\"])\n self.assertIsNot(root.a, copied)\n self.assertIsNot(root.a[\"a\"], copied[\"a\"])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_dict[\"b\"] = []\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.deepcopy(root.a))\n\n def testShallowCopyCheckpointable(self):\n original = tracking.Checkpointable()\n original_sub = tracking.Checkpointable()\n original.a = [[1.]]\n original.b = {\"a\": original_sub}\n shallow_copied = copy.copy(original)\n self.assertIs(original_sub, shallow_copied.b[\"a\"])\n self.assertIsNot(original, shallow_copied)\n self.assertEqual([[1.]], shallow_copied.a)\n shallow_deps = util.list_objects(shallow_copied)\n self.assertIn(shallow_copied.a, shallow_deps)\n self.assertIn(shallow_copied.b, shallow_deps)\n self.assertIn(shallow_copied.b[\"a\"], shallow_deps)\n\n def testDeepCopyCheckpointable(self):\n original = tracking.Checkpointable()\n original_sub = tracking.Checkpointable()\n original.a = [[1.]]\n original.b = {\"a\": original_sub}\n deep_copied = copy.deepcopy(original)\n self.assertIsNot(original, deep_copied)\n self.assertIsNot(original_sub, deep_copied.b[\"a\"])\n self.assertEqual([[1.]], deep_copied.a)\n self.assertIsInstance(deep_copied.b[\"a\"], tracking.Checkpointable)\n deps = util.list_objects(deep_copied)\n self.assertIn(deep_copied.a, deps)\n self.assertIn(deep_copied.b, deps)\n self.assertIn(deep_copied.b[\"a\"], deps)\n self.assertNotIn(original_sub, deps)\n\n def testConstructableFromSequence(self):\n result = data_structures._DictWrapper([(1, 2), (3, 4)])\n self.assertIsInstance(result, dict)\n self.assertEqual({1: 2, 3: 4}, result)\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.lib.io.file_io.stat",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.lib.io.file_io.file_exists",
"tensorflow.python.lib.io.file_io.delete_file",
"tensorflow.python.lib.io.file_io.read_file_to_string",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.training.training_util.global_step",
"tensorflow.python.lib.io.file_io.get_matching_files",
"tensorflow.python.training.checkpoint_state_pb2.CheckpointState",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.deprecation.deprecated"
],
[
"tensorflow.core.framework.types_pb2.DataType.values",
"numpy.issubdtype",
"numpy.dtype",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.pywrap_tensorflow.TF_bfloat16_type"
],
[
"numpy.diag",
"tensorflow.python.ops.array_ops.matrix_set_diag",
"numpy.random.seed",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.arange",
"tensorflow.python.ops.array_ops.diag",
"tensorflow.python.platform.tf_logging.info",
"numpy.ones",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.matrix_diag",
"tensorflow.python.platform.test.main",
"numpy.random.rand",
"tensorflow.python.ops.array_ops.diag_part",
"numpy.array",
"numpy.empty",
"tensorflow.python.ops.array_ops.matrix_diag_part"
],
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.sparse_placeholder",
"numpy.random.randn",
"tensorflow.python.framework.ops.device",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_sparse_tensor_slices",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"numpy.median",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.client.session.Session",
"tensorflow.python.data.util.nest.flatten",
"numpy.array",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"numpy.empty"
],
[
"tensorflow.python.data.experimental.ops.sleep.sleep",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.dataset_ops.Dataset.range"
],
[
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gradients_impl._AggregateIndexedSlicesGradients",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.distribute.all_reduce.build_nccl_then_recursive_hd",
"tensorflow.python.ops.gradients_impl._HandleNestedIndexedSlices",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.distribute.all_reduce.build_shuffle_then_shuffle",
"tensorflow.python.distribute.all_reduce.build_ring_all_reduce",
"tensorflow.python.ops.array_ops.is_finite",
"tensorflow.python.distribute.all_reduce.build_nccl_then_ring",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.distribute.all_reduce.build_shuffle_all_reduce",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.nccl_ops.all_sum",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.collective_ops.all_reduce",
"tensorflow.python.distribute.all_reduce.build_nccl_then_shuffle"
],
[
"tensorflow.python.ops.ragged.from_row_splits",
"tensorflow.python.ops.ragged.constant",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.ragged.map_inner_values",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.ragged.from_row_splits",
"tensorflow.python.ops.ragged.batch_gather",
"tensorflow.python.ops.ragged.constant",
"tensorflow.python.ops.ragged.constant_value",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.signal.mfcc_ops.mfccs_from_log_mel_spectrograms",
"tensorflow.python.ops.spectral_ops_test_util.fft_kernel_label_map",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.random_ops.random_normal"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.candidate_sampling_ops.log_uniform_candidate_sampler",
"tensorflow.python.ops.candidate_sampling_ops.compute_accidental_hits",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.candidate_sampling_ops.all_candidate_sampler",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.ragged.where",
"tensorflow.python.ops.ragged.constant_value",
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.training.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.math_ops.to_double",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.confusion_matrix.confusion_matrix",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.ops.array_ops.diag_part",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.state_ops.scatter_add",
"tensorflow.python.ops.math_ops.logical_and",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.to_int64",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.ops.sparse_ops.sparse_concat",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.sets.set_size",
"tensorflow.python.ops.math_ops.div_no_nan",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.sets.set_difference",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.ops.sparse_ops.sparse_reshape",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.div",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.nn.top_k",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.check_ops.assert_type",
"tensorflow.python.ops.weights_broadcast_ops.assert_broadcastable",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.confusion_matrix.remove_squeezable_dimensions",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.ops.sets.set_intersection",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.array_ops.broadcast_to",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.array_ops.sequence_mask",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.autograph.converters.control_flow.transform",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.keras.layers.normalization.BatchNormalization",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.training.checkpointable.data_structures.Mapping",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.training.checkpointable.util.list_objects",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.eager.test.main",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.training.checkpointable.data_structures._ListWrapper",
"tensorflow.python.training.checkpointable.data_structures._DictWrapper",
"tensorflow.python.training.checkpointable.tracking.Checkpointable",
"tensorflow.python.training.checkpointable.data_structures.List",
"numpy.ones",
"tensorflow.python.training.checkpointable.data_structures.NoDependency",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.keras.engine.training.Model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"1.4",
"2.6",
"1.13",
"2.3",
"2.4",
"2.2",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"2.3",
"2.2",
"2.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
}
] |
mantuoluozk/MFC | [
"e296d7a8e345bc2ca404b5f0fb7f5048f9c5f0d3"
] | [
"code/test_util.py"
] | [
"import h5py\nimport math\nimport nibabel as nib\nimport numpy as np\nfrom medpy import metric\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom skimage.measure import label\n\n\ndef getLargestCC(segmentation):\n labels = label(segmentation)\n assert(labels.max() != 0) # assume at least 1 CC\n largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1\n return largestCC\n\n\ndef test_all_case(net, image_list, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, save_result=True, test_save_path=None, preproc_fn=None, metric_detail=0, nms=0):\n total_metric = 0.0\n loader = tqdm(image_list) if not metric_detail else image_list\n ith = 0\n for image_path in loader:\n # id = image_path.split('/')[-2]\n h5f = h5py.File(image_path, 'r')\n image = h5f['image'][:]\n label = h5f['label'][:]\n if preproc_fn is not None:\n image = preproc_fn(image)\n prediction, score_map = test_single_case(\n net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)\n if nms:\n prediction = getLargestCC(prediction)\n\n if np.sum(prediction) == 0:\n single_metric = (0, 0, 0, 0)\n else:\n single_metric = calculate_metric_percase(prediction, label[:])\n if metric_detail:\n print('%02d,\\t%.5f, %.5f, %.5f, %.5f' % (\n ith, single_metric[0], single_metric[1], single_metric[2], single_metric[3]))\n\n total_metric += np.asarray(single_metric)\n\n if save_result:\n nib.save(nib.Nifti1Image(prediction.astype(np.float32),\n np.eye(4)), test_save_path + \"%02d_pred.nii.gz\" % ith)\n nib.save(nib.Nifti1Image(image[:].astype(np.float32), np.eye(\n 4)), test_save_path + \"%02d_img.nii.gz\" % ith)\n nib.save(nib.Nifti1Image(label[:].astype(np.float32), np.eye(\n 4)), test_save_path + \"%02d_gt.nii.gz\" % ith)\n ith += 1\n\n avg_metric = total_metric / len(image_list)\n print('average metric is {}'.format(avg_metric))\n\n return avg_metric\n\n\ndef test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):\n w, h, d = image.shape\n\n # if the size of image is less than patch_size, then padding it\n add_pad = False\n if w < patch_size[0]:\n w_pad = patch_size[0]-w\n add_pad = True\n else:\n w_pad = 0\n if h < patch_size[1]:\n h_pad = patch_size[1]-h\n add_pad = True\n else:\n h_pad = 0\n if d < patch_size[2]:\n d_pad = patch_size[2]-d\n add_pad = True\n else:\n d_pad = 0\n wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2\n hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2\n dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2\n if add_pad:\n image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),\n (dl_pad, dr_pad)], mode='constant', constant_values=0)\n ww, hh, dd = image.shape\n\n sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1\n sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1\n sz = math.ceil((dd - patch_size[2]) / stride_z) + 1\n # print(\"{}, {}, {}\".format(sx, sy, sz))\n score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)\n cnt = np.zeros(image.shape).astype(np.float32)\n\n for x in range(0, sx):\n xs = min(stride_xy*x, ww-patch_size[0])\n for y in range(0, sy):\n ys = min(stride_xy * y, hh-patch_size[1])\n for z in range(0, sz):\n zs = min(stride_z * z, dd-patch_size[2])\n test_patch = image[xs:xs+patch_size[0],\n ys:ys+patch_size[1], zs:zs+patch_size[2]]\n test_patch = np.expand_dims(np.expand_dims(\n test_patch, axis=0), axis=0).astype(np.float32)\n test_patch = torch.from_numpy(test_patch).cuda()\n\n with torch.no_grad():\n y1_tanh, y1= net(test_patch)\n # ensemble\n y = torch.sigmoid(y1)\n dis_to_mask = torch.sigmoid(-1500*y1_tanh)\n\n y = y.cpu().data.numpy()\n dis2mask = dis_to_mask.cpu().data.numpy()\n y = y[0, :, :, :, :]\n dis2mask = dis2mask[0, :, :, :, :]\n score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \\\n = score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y\n cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \\\n = cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1\n score_map = score_map/np.expand_dims(cnt, axis=0)\n label_map = (score_map[0] > 0.5).astype(np.int)\n\n if add_pad:\n label_map = label_map[wl_pad:wl_pad+w,\n hl_pad:hl_pad+h, dl_pad:dl_pad+d]\n score_map = score_map[:, wl_pad:wl_pad +\n w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]\n return label_map, score_map\n\n\ndef cal_dice(prediction, label, num=2):\n total_dice = np.zeros(num-1)\n for i in range(1, num):\n prediction_tmp = (prediction == i)\n label_tmp = (label == i)\n prediction_tmp = prediction_tmp.astype(np.float)\n label_tmp = label_tmp.astype(np.float)\n\n dice = 2 * np.sum(prediction_tmp * label_tmp) / \\\n (np.sum(prediction_tmp) + np.sum(label_tmp))\n total_dice[i - 1] += dice\n\n return total_dice\n\n\ndef calculate_metric_percase(pred, gt):\n dice = metric.binary.dc(pred, gt)\n jc = metric.binary.jc(pred, gt)\n hd = metric.binary.hd95(pred, gt)\n asd = metric.binary.asd(pred, gt)\n\n return dice, jc, hd, asd\n"
] | [
[
"torch.sigmoid",
"numpy.expand_dims",
"numpy.pad",
"numpy.asarray",
"numpy.eye",
"torch.from_numpy",
"torch.no_grad",
"numpy.bincount",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zs7779/Pytorch_Retinaface | [
"eeb92c28f3217da7439118ed89df8a83c75cc161"
] | [
"retina_models/retinaface.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision.models.detection.backbone_utils as backbone_utils\nimport torchvision.models._utils as _utils\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\nfrom retina_models.net import MobileNetV1 as MobileNetV1\nfrom retina_models.net import FPN as FPN\nfrom retina_models.net import SSH as SSH\n\n\n\nclass ClassHead(nn.Module):\n def __init__(self,inchannels=512,num_anchors=3):\n super(ClassHead,self).__init__()\n self.num_anchors = num_anchors\n self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)\n\n def forward(self,x):\n out = self.conv1x1(x)\n out = out.permute(0,2,3,1).contiguous()\n \n return out.view(out.shape[0], -1, 2)\n\nclass BboxHead(nn.Module):\n def __init__(self,inchannels=512,num_anchors=3):\n super(BboxHead,self).__init__()\n self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)\n\n def forward(self,x):\n out = self.conv1x1(x)\n out = out.permute(0,2,3,1).contiguous()\n\n return out.view(out.shape[0], -1, 4)\n\nclass LandmarkHead(nn.Module):\n def __init__(self,inchannels=512,num_anchors=3):\n super(LandmarkHead,self).__init__()\n self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)\n\n def forward(self,x):\n out = self.conv1x1(x)\n out = out.permute(0,2,3,1).contiguous()\n\n return out.view(out.shape[0], -1, 10)\n\nclass RetinaFace(nn.Module):\n def __init__(self, cfg = None, phase = 'train'):\n \"\"\"\n :param cfg: Network related settings.\n :param phase: train or test.\n \"\"\"\n super(RetinaFace,self).__init__()\n self.phase = phase\n backbone = None\n if cfg['name'] == 'mobilenet0.25':\n backbone = MobileNetV1()\n if cfg['pretrain']:\n checkpoint = torch.load(\"./Pytorch_Retinaface/weights/mobilenetV1X0.25_pretrain.tar\", map_location=torch.device('cpu'))\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in checkpoint['state_dict'].items():\n name = k[7:] # remove module.\n new_state_dict[name] = v\n # load params\n backbone.load_state_dict(new_state_dict)\n elif cfg['name'] == 'Resnet50':\n import torchvision.models as models\n backbone = models.resnet50(pretrained=cfg['pretrain'])\n\n self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])\n in_channels_stage2 = cfg['in_channel']\n in_channels_list = [\n in_channels_stage2 * 2,\n in_channels_stage2 * 4,\n in_channels_stage2 * 8,\n ]\n out_channels = cfg['out_channel']\n self.fpn = FPN(in_channels_list,out_channels)\n self.ssh1 = SSH(out_channels, out_channels)\n self.ssh2 = SSH(out_channels, out_channels)\n self.ssh3 = SSH(out_channels, out_channels)\n\n self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])\n self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])\n self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])\n\n def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):\n classhead = nn.ModuleList()\n for i in range(fpn_num):\n classhead.append(ClassHead(inchannels,anchor_num))\n return classhead\n \n def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):\n bboxhead = nn.ModuleList()\n for i in range(fpn_num):\n bboxhead.append(BboxHead(inchannels,anchor_num))\n return bboxhead\n\n def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):\n landmarkhead = nn.ModuleList()\n for i in range(fpn_num):\n landmarkhead.append(LandmarkHead(inchannels,anchor_num))\n return landmarkhead\n\n def forward(self,inputs):\n out = self.body(inputs)\n\n # FPN\n fpn = self.fpn(out)\n\n # SSH\n feature1 = self.ssh1(fpn[0])\n feature2 = self.ssh2(fpn[1])\n feature3 = self.ssh3(fpn[2])\n features = [feature1, feature2, feature3]\n\n bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)\n classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)\n ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)\n\n if self.phase == 'train':\n output = (bbox_regressions, classifications, ldm_regressions)\n else:\n output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)\n return output"
] | [
[
"torch.device",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.softmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ldelebec/asteroid | [
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54",
"d6390baca5409634f112ceed554ea66c4054cb54"
] | [
"asteroid/models/demask.py",
"tests/engine/scheduler_test.py",
"asteroid/losses/pit_wrapper.py",
"asteroid/data/utils.py",
"egs/wham/MixIT/eval.py",
"egs/dns_challenge/baseline/denoise.py",
"tests/losses/mixit_wrapper_test.py",
"asteroid/utils/torch_utils.py",
"egs/avspeech/looking-to-listen/local/loader/download.py"
] | [
"from torch import nn\nfrom .base_models import BaseEncoderMaskerDecoder\nfrom asteroid_filterbanks import make_enc_dec\nfrom asteroid_filterbanks.transforms import mag, magreim\nfrom ..masknn import norms, activations\nfrom ..utils.torch_utils import pad_x_to_y\nimport warnings\n\n\nclass DeMask(BaseEncoderMaskerDecoder):\n \"\"\"\n Simple MLP model for surgical mask speech enhancement A transformed-domain masking approach is used.\n\n Args:\n input_type (str, optional): whether the magnitude spectrogram \"mag\" or both real imaginary parts \"reim\" are\n passed as features to the masker network.\n Concatenation of \"mag\" and \"reim\" also can be used by using \"cat\".\n output_type (str, optional): whether the masker ouputs a mask\n for magnitude spectrogram \"mag\" or both real imaginary parts \"reim\".\n\n hidden_dims (list, optional): list of MLP hidden layer sizes.\n dropout (float, optional): dropout probability.\n activation (str, optional): type of activation used in hidden MLP layers.\n mask_act (str, optional): Which non-linear function to generate mask.\n norm_type (str, optional): To choose from ``'BN'``, ``'gLN'``,\n ``'cLN'``.\n\n fb_name (str): type of analysis and synthesis filterbanks used,\n choose between [\"stft\", \"free\", \"analytic_free\"].\n n_filters (int): number of filters in the analysis and synthesis filterbanks.\n stride (int): filterbank filters stride.\n kernel_size (int): length of filters in the filterbank.\n encoder_activation (str)\n sample_rate (float): Sampling rate of the model.\n **fb_kwargs (dict): Additional kwards to pass to the filterbank\n creation.\n \"\"\"\n\n def __init__(\n self,\n input_type=\"mag\",\n output_type=\"mag\",\n hidden_dims=(1024,),\n dropout=0.0,\n activation=\"relu\",\n mask_act=\"relu\",\n norm_type=\"gLN\",\n fb_name=\"stft\",\n n_filters=512,\n stride=256,\n kernel_size=512,\n sample_rate=16000,\n **fb_kwargs,\n ):\n encoder, decoder = make_enc_dec(\n fb_name,\n kernel_size=kernel_size,\n n_filters=n_filters,\n stride=stride,\n sample_rate=sample_rate,\n **fb_kwargs,\n )\n\n n_masker_in = self._get_n_feats_input(input_type, encoder.n_feats_out)\n n_masker_out = self._get_n_feats_output(output_type, encoder.n_feats_out)\n masker = build_demask_masker(\n n_masker_in,\n n_masker_out,\n norm_type=norm_type,\n activation=activation,\n hidden_dims=hidden_dims,\n dropout=dropout,\n mask_act=mask_act,\n )\n super().__init__(encoder, masker, decoder)\n\n self.input_type = input_type\n self.output_type = output_type\n self.hidden_dims = hidden_dims\n self.dropout = dropout\n self.activation = activation\n self.mask_act = mask_act\n self.norm_type = norm_type\n\n def _get_n_feats_input(self, input_type, encoder_n_out):\n if input_type == \"reim\":\n return encoder_n_out\n\n if input_type not in {\"mag\", \"cat\"}:\n raise NotImplementedError(\"Input type should be either mag, reim or cat\")\n\n n_feats_input = encoder_n_out // 2\n if input_type == \"cat\":\n n_feats_input += encoder_n_out\n return n_feats_input\n\n def _get_n_feats_output(self, output_type, encoder_n_out):\n if output_type == \"mag\":\n return encoder_n_out // 2\n if output_type == \"reim\":\n return encoder_n_out\n raise NotImplementedError(\"Output type should be either mag or reim\")\n\n def forward_masker(self, tf_rep):\n \"\"\"Estimates masks based on time-frequency representations.\n\n Args:\n tf_rep (torch.Tensor): Time-frequency representation in\n (batch, freq, seq).\n\n Returns:\n torch.Tensor: Estimated masks in (batch, freq, seq).\n \"\"\"\n masker_input = tf_rep\n if self.input_type == \"mag\":\n masker_input = mag(masker_input)\n elif self.input_type == \"cat\":\n masker_input = magreim(masker_input)\n est_masks = self.masker(masker_input)\n if self.output_type == \"mag\":\n est_masks = est_masks.repeat(1, 2, 1)\n return est_masks\n\n def apply_masks(self, tf_rep, est_masks):\n \"\"\"Applies masks to time-frequency representations.\n\n Args:\n tf_rep (torch.Tensor): Time-frequency representations in\n (batch, freq, seq).\n est_masks (torch.Tensor): Estimated masks in (batch, freq, seq).\n\n Returns:\n torch.Tensor: Masked time-frequency representations.\n \"\"\"\n if self.output_type == \"reim\":\n tf_rep = tf_rep.unsqueeze(1)\n return est_masks * tf_rep\n\n def get_model_args(self):\n \"\"\" Arguments needed to re-instantiate the model. \"\"\"\n model_args = {\n \"input_type\": self.input_type,\n \"output_type\": self.output_type,\n \"hidden_dims\": self.hidden_dims,\n \"dropout\": self.dropout,\n \"activation\": self.activation,\n \"mask_act\": self.mask_act,\n \"norm_type\": self.norm_type,\n }\n model_args.update(self.encoder.filterbank.get_config())\n return model_args\n\n\ndef build_demask_masker(\n n_in,\n n_out,\n activation=\"relu\",\n dropout=0.0,\n hidden_dims=(1024,),\n mask_act=\"relu\",\n norm_type=\"gLN\",\n):\n make_layer_norm = norms.get(norm_type)\n net = [make_layer_norm(n_in)]\n layer_activation = activations.get(activation)()\n in_chan = n_in\n for hidden_dim in hidden_dims:\n net.extend(\n [\n nn.Conv1d(in_chan, hidden_dim, 1),\n make_layer_norm(hidden_dim),\n layer_activation,\n nn.Dropout(dropout),\n ]\n )\n in_chan = hidden_dim\n\n net.extend([nn.Conv1d(in_chan, n_out, 1), activations.get(mask_act)()])\n return nn.Sequential(*net)\n",
"from torch import nn, optim\nfrom torch.utils import data\nfrom pytorch_lightning import Trainer\n\n\nfrom asteroid.engine.system import System\nfrom asteroid.utils.test_utils import DummyDataset\nfrom asteroid.engine.schedulers import NoamScheduler, DPTNetScheduler\n\n\ndef common_setup():\n model = nn.Sequential(nn.Linear(10, 10), nn.ReLU())\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n dataset = DummyDataset()\n loader = data.DataLoader(dataset, batch_size=2, num_workers=4)\n trainer = Trainer(max_epochs=1, fast_dev_run=True)\n return model, optimizer, loader, trainer\n\n\ndef test_state_dict():\n \"\"\" Load and serialize scheduler. \"\"\"\n model, optimizer, loader, trainer = common_setup()\n sched = NoamScheduler(optimizer, d_model=10, warmup_steps=100)\n state_dict = sched.state_dict()\n sched.load_state_dict(state_dict)\n state_dict_c = sched.state_dict()\n assert state_dict == state_dict_c\n # Test zero_grad\n sched.zero_grad()\n\n\ndef test_noam_scheduler():\n model, optimizer, loader, trainer = common_setup()\n scheduler = {\n \"scheduler\": NoamScheduler(optimizer, d_model=10, warmup_steps=100),\n \"interval\": \"step\",\n }\n\n system = System(\n model,\n optimizer,\n loss_func=nn.MSELoss(),\n train_loader=loader,\n val_loader=loader,\n scheduler=scheduler,\n )\n trainer.fit(system)\n # Test `as_tensor` for `plot`\n scheduler[\"scheduler\"].as_tensor()\n\n\ndef test_dptnet_scheduler():\n model, optimizer, loader, trainer = common_setup()\n\n scheduler = {\n \"scheduler\": DPTNetScheduler(optimizer, d_model=10, steps_per_epoch=6, warmup_steps=4),\n \"interval\": \"step\",\n }\n\n system = System(\n model,\n optimizer,\n loss_func=nn.MSELoss(),\n train_loader=loader,\n val_loader=loader,\n scheduler=scheduler,\n )\n trainer.fit(system)\n # Test `as_tensor` for `plot`\n scheduler[\"scheduler\"].as_tensor()\n",
"from itertools import permutations\nimport torch\nfrom torch import nn\nfrom scipy.optimize import linear_sum_assignment\n\n\nclass PITLossWrapper(nn.Module):\n r\"\"\"Permutation invariant loss wrapper.\n\n Args:\n loss_func: function with signature (est_targets, targets, **kwargs).\n pit_from (str): Determines how PIT is applied.\n\n * ``'pw_mtx'`` (pairwise matrix): `loss_func` computes pairwise\n losses and returns a torch.Tensor of shape\n :math:`(batch, n\\_src, n\\_src)`. Each element\n :math:`(batch, i, j)` corresponds to the loss between\n :math:`targets[:, i]` and :math:`est\\_targets[:, j]`\n * ``'pw_pt'`` (pairwise point): `loss_func` computes the loss for\n a batch of single source and single estimates (tensors won't\n have the source axis). Output shape : :math:`(batch)`.\n See :meth:`~PITLossWrapper.get_pw_losses`.\n * ``'perm_avg'`` (permutation average): `loss_func` computes the\n average loss for a given permutations of the sources and\n estimates. Output shape : :math:`(batch)`.\n See :meth:`~PITLossWrapper.best_perm_from_perm_avg_loss`.\n\n In terms of efficiency, ``'perm_avg'`` is the least efficicient.\n\n perm_reduce (Callable): torch function to reduce permutation losses.\n Defaults to None (equivalent to mean). Signature of the func\n (pwl_set, **kwargs) : :math:`(B, n\\_src!, n\\_src) --> (B, n\\_src!)`.\n `perm_reduce` can receive **kwargs during forward using the\n `reduce_kwargs` argument (dict). If those argument are static,\n consider defining a small function or using `functools.partial`.\n Only used in `'pw_mtx'` and `'pw_pt'` `pit_from` modes.\n\n For each of these modes, the best permutation and reordering will be\n automatically computed. When either ``'pw_mtx'`` or ``'pw_pt'`` is used,\n and the number of sources is larger than three, the hungarian algorithm is\n used to find the best permutation.\n\n Examples\n >>> import torch\n >>> from asteroid.losses import pairwise_neg_sisdr\n >>> sources = torch.randn(10, 3, 16000)\n >>> est_sources = torch.randn(10, 3, 16000)\n >>> # Compute PIT loss based on pairwise losses\n >>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')\n >>> loss_val = loss_func(est_sources, sources)\n >>>\n >>> # Using reduce\n >>> def reduce(perm_loss, src):\n >>> weighted = perm_loss * src.norm(dim=-1, keepdim=True)\n >>> return torch.mean(weighted, dim=-1)\n >>>\n >>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx',\n >>> perm_reduce=reduce)\n >>> reduce_kwargs = {'src': sources}\n >>> loss_val = loss_func(est_sources, sources,\n >>> reduce_kwargs=reduce_kwargs)\n \"\"\"\n\n def __init__(self, loss_func, pit_from=\"pw_mtx\", perm_reduce=None):\n super().__init__()\n self.loss_func = loss_func\n self.pit_from = pit_from\n self.perm_reduce = perm_reduce\n if self.pit_from not in [\"pw_mtx\", \"pw_pt\", \"perm_avg\"]:\n raise ValueError(\n \"Unsupported loss function type for now. Expected\"\n \"one of [`pw_mtx`, `pw_pt`, `perm_avg`]\"\n )\n\n def forward(self, est_targets, targets, return_est=False, reduce_kwargs=None, **kwargs):\n r\"\"\"Find the best permutation and return the loss.\n\n Args:\n est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.\n The batch of target estimates.\n targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.\n The batch of training targets\n return_est: Boolean. Whether to return the reordered targets\n estimates (To compute metrics or to save example).\n reduce_kwargs (dict or None): kwargs that will be passed to the\n pairwise losses reduce function (`perm_reduce`).\n **kwargs: additional keyword argument that will be passed to the\n loss function.\n\n Returns:\n - Best permutation loss for each batch sample, average over\n the batch.\n - The reordered targets estimates if ``return_est`` is True.\n :class:`torch.Tensor` of shape $(batch, nsrc, ...)$.\n \"\"\"\n n_src = targets.shape[1]\n assert n_src < 10, f\"Expected source axis along dim 1, found {n_src}\"\n if self.pit_from == \"pw_mtx\":\n # Loss function already returns pairwise losses\n pw_losses = self.loss_func(est_targets, targets, **kwargs)\n elif self.pit_from == \"pw_pt\":\n # Compute pairwise losses with a for loop.\n pw_losses = self.get_pw_losses(self.loss_func, est_targets, targets, **kwargs)\n elif self.pit_from == \"perm_avg\":\n # Cannot get pairwise losses from this type of loss.\n # Find best permutation directly.\n min_loss, batch_indices = self.best_perm_from_perm_avg_loss(\n self.loss_func, est_targets, targets, **kwargs\n )\n # Take the mean over the batch\n mean_loss = torch.mean(min_loss)\n if not return_est:\n return mean_loss\n reordered = self.reorder_source(est_targets, batch_indices)\n return mean_loss, reordered\n else:\n return\n\n assert pw_losses.ndim == 3, (\n \"Something went wrong with the loss \" \"function, please read the docs.\"\n )\n assert pw_losses.shape[0] == targets.shape[0], \"PIT loss needs same batch dim as input\"\n\n reduce_kwargs = reduce_kwargs if reduce_kwargs is not None else dict()\n min_loss, batch_indices = self.find_best_perm(\n pw_losses, perm_reduce=self.perm_reduce, **reduce_kwargs\n )\n mean_loss = torch.mean(min_loss)\n if not return_est:\n return mean_loss\n reordered = self.reorder_source(est_targets, batch_indices)\n return mean_loss, reordered\n\n @staticmethod\n def get_pw_losses(loss_func, est_targets, targets, **kwargs):\n r\"\"\"Get pair-wise losses between the training targets and its estimate\n for a given loss function.\n\n Args:\n loss_func: function with signature (est_targets, targets, **kwargs)\n The loss function to get pair-wise losses from.\n est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.\n The batch of target estimates.\n targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.\n The batch of training targets.\n **kwargs: additional keyword argument that will be passed to the\n loss function.\n\n Returns:\n torch.Tensor or size $(batch, nsrc, nsrc)$, losses computed for\n all permutations of the targets and est_targets.\n\n This function can be called on a loss function which returns a tensor\n of size :math:`(batch)`. There are more efficient ways to compute pair-wise\n losses using broadcasting.\n \"\"\"\n batch_size, n_src, *_ = targets.shape\n pair_wise_losses = targets.new_empty(batch_size, n_src, n_src)\n for est_idx, est_src in enumerate(est_targets.transpose(0, 1)):\n for target_idx, target_src in enumerate(targets.transpose(0, 1)):\n pair_wise_losses[:, est_idx, target_idx] = loss_func(est_src, target_src, **kwargs)\n return pair_wise_losses\n\n @staticmethod\n def best_perm_from_perm_avg_loss(loss_func, est_targets, targets, **kwargs):\n r\"\"\"Find best permutation from loss function with source axis.\n\n Args:\n loss_func: function with signature $(est_targets, targets, **kwargs)$\n The loss function batch losses from.\n est_targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.\n The batch of target estimates.\n targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.\n The batch of training targets.\n **kwargs: additional keyword argument that will be passed to the\n loss function.\n\n Returns:\n - :class:`torch.Tensor`:\n The loss corresponding to the best permutation of size $(batch,)$.\n\n - :class:`torch.Tensor`:\n The indices of the best permutations.\n \"\"\"\n n_src = targets.shape[1]\n perms = torch.tensor(list(permutations(range(n_src))), dtype=torch.long)\n loss_set = torch.stack(\n [loss_func(est_targets[:, perm], targets, **kwargs) for perm in perms], dim=1\n )\n # Indexes and values of min losses for each batch element\n min_loss, min_loss_idx = torch.min(loss_set, dim=1)\n # Permutation indices for each batch.\n batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)\n return min_loss, batch_indices\n\n @staticmethod\n def find_best_perm(pair_wise_losses, perm_reduce=None, **kwargs):\n r\"\"\"Find the best permutation, given the pair-wise losses.\n\n Dispatch between factorial method if number of sources is small (<3)\n and hungarian method for more sources. If ``perm_reduce`` is not None,\n the factorial method is always used.\n\n Args:\n pair_wise_losses (:class:`torch.Tensor`):\n Tensor of shape :math:`(batch, n\\_src, n\\_src)`. Pairwise losses.\n perm_reduce (Callable): torch function to reduce permutation losses.\n Defaults to None (equivalent to mean). Signature of the func\n (pwl_set, **kwargs) : :math:`(B, n\\_src!, n\\_src) -> (B, n\\_src!)`\n **kwargs: additional keyword argument that will be passed to the\n permutation reduce function.\n\n Returns:\n - :class:`torch.Tensor`:\n The loss corresponding to the best permutation of size $(batch,)$.\n\n - :class:`torch.Tensor`:\n The indices of the best permutations.\n \"\"\"\n n_src = pair_wise_losses.shape[-1]\n if perm_reduce is not None or n_src <= 3:\n min_loss, batch_indices = PITLossWrapper.find_best_perm_factorial(\n pair_wise_losses, perm_reduce=perm_reduce, **kwargs\n )\n else:\n min_loss, batch_indices = PITLossWrapper.find_best_perm_hungarian(pair_wise_losses)\n return min_loss, batch_indices\n\n @staticmethod\n def reorder_source(source, batch_indices):\n r\"\"\"Reorder sources according to the best permutation.\n\n Args:\n source (torch.Tensor): Tensor of shape :math:`(batch, n_src, time)`\n batch_indices (torch.Tensor): Tensor of shape :math:`(batch, n_src)`.\n Contains optimal permutation indices for each batch.\n\n Returns:\n :class:`torch.Tensor`: Reordered sources.\n \"\"\"\n reordered_sources = torch.stack(\n [torch.index_select(s, 0, b) for s, b in zip(source, batch_indices)]\n )\n return reordered_sources\n\n @staticmethod\n def find_best_perm_factorial(pair_wise_losses, perm_reduce=None, **kwargs):\n r\"\"\"Find the best permutation given the pair-wise losses by looping\n through all the permutations.\n\n Args:\n pair_wise_losses (:class:`torch.Tensor`):\n Tensor of shape :math:`(batch, n_src, n_src)`. Pairwise losses.\n perm_reduce (Callable): torch function to reduce permutation losses.\n Defaults to None (equivalent to mean). Signature of the func\n (pwl_set, **kwargs) : :math:`(B, n\\_src!, n\\_src) -> (B, n\\_src!)`\n **kwargs: additional keyword argument that will be passed to the\n permutation reduce function.\n\n Returns:\n - :class:`torch.Tensor`:\n The loss corresponding to the best permutation of size $(batch,)$.\n\n - :class:`torch.Tensor`:\n The indices of the best permutations.\n\n MIT Copyright (c) 2018 Kaituo XU.\n See `Original code\n <https://github.com/kaituoxu/Conv-TasNet/blob/master>`__ and `License\n <https://github.com/kaituoxu/Conv-TasNet/blob/master/LICENSE>`__.\n \"\"\"\n n_src = pair_wise_losses.shape[-1]\n # After transposition, dim 1 corresp. to sources and dim 2 to estimates\n pwl = pair_wise_losses.transpose(-1, -2)\n perms = pwl.new_tensor(list(permutations(range(n_src))), dtype=torch.long)\n # Column permutation indices\n idx = torch.unsqueeze(perms, 2)\n # Loss mean of each permutation\n if perm_reduce is None:\n # one-hot, [n_src!, n_src, n_src]\n perms_one_hot = pwl.new_zeros((*perms.size(), n_src)).scatter_(2, idx, 1)\n loss_set = torch.einsum(\"bij,pij->bp\", [pwl, perms_one_hot])\n loss_set /= n_src\n else:\n # batch = pwl.shape[0]; n_perm = idx.shape[0]\n # [batch, n_src!, n_src] : Pairwise losses for each permutation.\n pwl_set = pwl[:, torch.arange(n_src), idx.squeeze(-1)]\n # Apply reduce [batch, n_src!, n_src] --> [batch, n_src!]\n loss_set = perm_reduce(pwl_set, **kwargs)\n # Indexes and values of min losses for each batch element\n min_loss, min_loss_idx = torch.min(loss_set, dim=1)\n\n # Permutation indices for each batch.\n batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)\n return min_loss, batch_indices\n\n @staticmethod\n def find_best_perm_hungarian(pair_wise_losses: torch.Tensor):\n \"\"\"\n Find the best permutation given the pair-wise losses, using the Hungarian algorithm.\n\n Returns:\n - :class:`torch.Tensor`:\n The loss corresponding to the best permutation of size (batch,).\n\n - :class:`torch.Tensor`:\n The indices of the best permutations.\n \"\"\"\n # After transposition, dim 1 corresp. to sources and dim 2 to estimates\n pwl = pair_wise_losses.transpose(-1, -2)\n # Just bring the numbers to cpu(), not the graph\n pwl_copy = pwl.detach().cpu()\n # Loop over batch + row indices are always ordered for square matrices.\n batch_indices = torch.tensor([linear_sum_assignment(pwl)[1] for pwl in pwl_copy]).to(\n pwl.device\n )\n min_loss = torch.gather(pwl, 2, batch_indices[..., None]).mean([-1, -2])\n return min_loss, batch_indices\n\n\nclass PITReorder(PITLossWrapper):\n \"\"\"Permutation invariant reorderer. Only returns the reordered estimates.\n See `:py:class:asteroid.losses.PITLossWrapper`.\"\"\"\n\n def forward(self, est_targets, targets, reduce_kwargs=None, **kwargs):\n _, reordered = super().forward(\n est_targets=est_targets,\n targets=targets,\n return_est=True,\n reduce_kwargs=reduce_kwargs,\n **kwargs,\n )\n return reordered\n",
"import torch\nfrom torch.utils.data._utils.collate import default_collate\n\n\ndef online_mixing_collate(batch):\n \"\"\"Mix target sources to create new mixtures.\n Output of the default collate function is expected to return two objects:\n inputs and targets.\n \"\"\"\n # Inputs (batch, time) / targets (batch, n_src, time)\n inputs, targets = default_collate(batch)\n batch, n_src, _ = targets.shape\n\n energies = torch.sum(targets ** 2, dim=-1, keepdim=True)\n new_src = []\n for i in range(targets.shape[1]):\n new_s = targets[torch.randperm(batch), i, :]\n new_s = new_s * torch.sqrt(energies[:, i] / (new_s ** 2).sum(-1, keepdims=True))\n new_src.append(new_s)\n\n targets = torch.stack(new_src, dim=1)\n inputs = targets.sum(1)\n return inputs, targets\n",
"import os\nimport random\nimport soundfile as sf\nimport torch\nimport yaml\nimport json\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom pprint import pprint\n\nfrom asteroid.metrics import get_metrics\nfrom asteroid.losses import PITLossWrapper, pairwise_neg_sisdr\nfrom asteroid.data.wham_dataset import WhamDataset\nfrom asteroid.models import DPRNNTasNet\nfrom asteroid.utils import tensors_to_device\nfrom asteroid.models import save_publishable\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--task\",\n type=str,\n required=True,\n help=\"One of `enh_single`, `enh_both`, \" \"`sep_clean` or `sep_noisy`\",\n)\nparser.add_argument(\n \"--test_dir\", type=str, required=True, help=\"Test directory including the json files\"\n)\nparser.add_argument(\n \"--use_gpu\", type=int, default=0, help=\"Whether to use the GPU for model execution\"\n)\nparser.add_argument(\"--exp_dir\", default=\"exp/tmp\", help=\"Experiment root\")\nparser.add_argument(\n \"--n_save_ex\", type=int, default=50, help=\"Number of audio examples to save, -1 means all\"\n)\n\ncompute_metrics = [\"si_sdr\", \"sdr\", \"sir\", \"sar\", \"stoi\"]\n\n\ndef main(conf):\n model_path = os.path.join(conf[\"exp_dir\"], \"best_model.pth\")\n model = DPRNNTasNet.from_pretrained(model_path)\n # Handle device placement\n if conf[\"use_gpu\"]:\n model.cuda()\n model_device = next(model.parameters()).device\n test_set = WhamDataset(\n conf[\"test_dir\"],\n conf[\"task\"],\n sample_rate=conf[\"sample_rate\"],\n nondefault_nsrc=None,\n segment=None,\n ) # Uses all segment length\n # Used to reorder sources only\n loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from=\"pw_mtx\")\n\n # Randomly choose the indexes of sentences to save.\n ex_save_dir = os.path.join(conf[\"exp_dir\"], \"examples/\")\n if conf[\"n_save_ex\"] == -1:\n conf[\"n_save_ex\"] = len(test_set)\n save_idx = random.sample(range(len(test_set)), conf[\"n_save_ex\"])\n series_list = []\n torch.no_grad().__enter__()\n for idx in tqdm(range(len(test_set))):\n # Forward the network on the mixture.\n mix, sources = tensors_to_device(test_set[idx], device=model_device)\n est_sources = model(mix[None, None])\n _, indxs = torch.sort(torch.sqrt(torch.mean(est_sources ** 2, dim=-1)), descending=True)\n indxs = indxs[:, :2]\n # we know a-priori that there are 2 sources in WHAM-clean (WSJ0-2mix clean)\n # so we sort the estimated signals and take only the two with highest energy.\n est_sources = est_sources.gather(1, indxs.unsqueeze(-1).repeat(1, 1, est_sources.shape[-1]))\n loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)\n mix_np = mix[None].cpu().data.numpy()\n sources_np = sources.cpu().data.numpy()\n est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()\n utt_metrics = get_metrics(\n mix_np,\n sources_np,\n est_sources_np,\n sample_rate=conf[\"sample_rate\"],\n metrics_list=compute_metrics,\n )\n utt_metrics[\"mix_path\"] = test_set.mix[idx][0]\n series_list.append(pd.Series(utt_metrics))\n\n # Save some examples in a folder. Wav files and metrics as text.\n if idx in save_idx:\n local_save_dir = os.path.join(ex_save_dir, \"ex_{}/\".format(idx))\n os.makedirs(local_save_dir, exist_ok=True)\n sf.write(local_save_dir + \"mixture.wav\", mix_np[0], conf[\"sample_rate\"])\n # Loop over the sources and estimates\n for src_idx, src in enumerate(sources_np):\n sf.write(local_save_dir + \"s{}.wav\".format(src_idx + 1), src, conf[\"sample_rate\"])\n for src_idx, est_src in enumerate(est_sources_np):\n est_src *= np.max(np.abs(mix_np)) / np.max(np.abs(est_src))\n sf.write(\n local_save_dir + \"s{}_estimate.wav\".format(src_idx + 1),\n est_src,\n conf[\"sample_rate\"],\n )\n # Write local metrics to the example folder.\n with open(local_save_dir + \"metrics.json\", \"w\") as f:\n json.dump(utt_metrics, f, indent=0)\n\n # Save all metrics to the experiment folder.\n all_metrics_df = pd.DataFrame(series_list)\n all_metrics_df.to_csv(os.path.join(conf[\"exp_dir\"], \"all_metrics.csv\"))\n\n # Print and save summary metrics\n final_results = {}\n for metric_name in compute_metrics:\n input_metric_name = \"input_\" + metric_name\n ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]\n final_results[metric_name] = all_metrics_df[metric_name].mean()\n final_results[metric_name + \"_imp\"] = ldf.mean()\n print(\"Overall metrics :\")\n pprint(final_results)\n with open(os.path.join(conf[\"exp_dir\"], \"final_metrics.json\"), \"w\") as f:\n json.dump(final_results, f, indent=0)\n\n model_dict = torch.load(model_path, map_location=\"cpu\")\n os.makedirs(os.path.join(conf[\"exp_dir\"], \"publish_dir\"), exist_ok=True)\n publishable = save_publishable(\n os.path.join(conf[\"exp_dir\"], \"publish_dir\"),\n model_dict,\n metrics=final_results,\n train_conf=train_conf,\n )\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n arg_dic = dict(vars(args))\n\n # Load training config\n conf_path = os.path.join(args.exp_dir, \"conf.yml\")\n with open(conf_path) as f:\n train_conf = yaml.safe_load(f)\n arg_dic[\"sample_rate\"] = train_conf[\"data\"][\"sample_rate\"]\n arg_dic[\"train_conf\"] = train_conf\n\n main(arg_dic)\n",
"import glob\nimport os\n\nimport soundfile as sf\nimport torch\nimport yaml\nimport json\nimport argparse\nfrom tqdm import tqdm\n\nfrom model import load_best_model\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--denoise_path\", type=str, required=True, help=\"Directory containing wav files, or file path\"\n)\nparser.add_argument(\n \"--use_gpu\", type=int, default=0, help=\"Whether to use the GPU for model execution\"\n)\nparser.add_argument(\"--exp_dir\", default=\"exp/tmp\", help=\"Experiment root\")\n\n\ndef main(conf):\n # Get best trained model\n model = load_best_model(conf[\"train_conf\"], conf[\"exp_dir\"])\n if conf[\"use_gpu\"]:\n model = model.cuda()\n model_device = next(model.parameters()).device\n # Get a list of wav files (or single wav file)\n save_folder = os.path.join(conf[\"exp_dir\"], \"denoise\")\n os.makedirs(save_folder, exist_ok=True)\n if os.path.isfile(conf[\"denoise_path\"]):\n all_wavs = [conf[\"denoise_path\"]]\n else:\n # If this is a bunch of files we need to denoise, call the subdir\n # of denoise the same way as the basename of the denoise dir.\n save_folder = os.path.join(save_folder, os.path.basename(conf[\"denoise_path\"]))\n all_wavs = glob.glob(conf[\"denoise_path\"] + \"*.wav\")\n\n for wav_path in tqdm(all_wavs):\n mix, fs = sf.read(wav_path, dtype=\"float32\")\n with torch.no_grad():\n net_inp = torch.tensor(mix)[None].to(model_device)\n estimate = model.denoise(net_inp).squeeze().cpu().data.numpy()\n # Save the estimate speech\n wav_name = os.path.basename(wav_path)\n sf.write(os.path.join(save_folder, wav_name), estimate, fs)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n arg_dic = dict(vars(args))\n\n # Load training config\n conf_path = os.path.join(args.exp_dir, \"conf.yml\")\n with open(conf_path) as conf_file:\n train_conf = yaml.safe_load(conf_file)\n arg_dic[\"train_conf\"] = train_conf\n\n main(arg_dic)\n",
"import pytest\nimport torch\n\nfrom asteroid.losses import MixITLossWrapper\n\n\ndef good_batch_loss_func(y_pred, y_true):\n batch, *_ = y_true.shape\n return torch.randn(batch)\n\n\[email protected](\"batch_size\", [1, 2, 8])\[email protected](\"n_src\", [2, 3, 4])\[email protected](\"time\", [16000])\ndef test_mixitwrapper_as_pit_wrapper(batch_size, n_src, time):\n targets = torch.randn(batch_size, n_src, time)\n est_targets = torch.randn(batch_size, n_src, time)\n\n # mix_it base case: targets == mixtures / With and without return estimates\n loss = MixITLossWrapper(good_batch_loss_func, generalized=False)\n loss(est_targets, targets)\n loss_value, reordered_est = loss(est_targets, targets, return_est=True)\n assert reordered_est.shape == est_targets.shape\n\n\[email protected](\"batch_size\", [1, 2, 4])\[email protected](\"factor\", [1, 2, 3])\[email protected](\"n_mix\", [2, 3])\[email protected](\"time\", [16000])\ndef test_mixit_wrapper(batch_size, factor, n_mix, time):\n mixtures = torch.randn(batch_size, n_mix, time)\n n_src = n_mix * factor\n est_targets = torch.randn(batch_size, n_src, time)\n\n # mix_it / With and without return estimates\n loss = MixITLossWrapper(good_batch_loss_func, generalized=False)\n loss(est_targets, mixtures)\n loss_value, reordered_mix = loss(est_targets, mixtures, return_est=True)\n assert reordered_mix.shape == mixtures.shape\n\n\[email protected](\"batch_size\", [1, 2, 8])\[email protected](\"n_src\", [2, 3, 4, 5])\[email protected](\"n_mix\", [2])\[email protected](\"time\", [16000])\ndef test_mixit_gen_wrapper(batch_size, n_src, n_mix, time):\n mixtures = torch.randn(batch_size, n_mix, time)\n est_targets = torch.randn(batch_size, n_src, time)\n\n # mix_it_gen / With and without return estimates. Works only with two mixtures\n loss = MixITLossWrapper(good_batch_loss_func)\n loss(est_targets, mixtures)\n loss_value, reordered_est = loss(est_targets, mixtures, return_est=True)\n assert reordered_est.shape == mixtures.shape\n",
"import functools\n\nimport torch\nfrom torch import nn\nfrom collections import OrderedDict\n\n\ndef to_cuda(tensors): # pragma: no cover\n \"\"\"Transfer tensor, dict or list of tensors to GPU.\n\n Args:\n tensors (:class:`torch.Tensor`, list or dict): May be a single, a\n list or a dictionary of tensors.\n\n Returns:\n :class:`torch.Tensor`:\n Same as input but transferred to cuda. Goes through lists and dicts\n and transfers the torch.Tensor to cuda. Leaves the rest untouched.\n \"\"\"\n if isinstance(tensors, torch.Tensor):\n return tensors.cuda()\n if isinstance(tensors, list):\n return [to_cuda(tens) for tens in tensors]\n if isinstance(tensors, dict):\n for key in tensors.keys():\n tensors[key] = to_cuda(tensors[key])\n return tensors\n raise TypeError(\n \"tensors must be a tensor or a list or dict of tensors. \"\n \" Got tensors of type {}\".format(type(tensors))\n )\n\n\ndef tensors_to_device(tensors, device):\n \"\"\"Transfer tensor, dict or list of tensors to device.\n\n Args:\n tensors (:class:`torch.Tensor`): May be a single, a list or a\n dictionary of tensors.\n device (:class: `torch.device`): the device where to place the tensors.\n\n Returns:\n Union [:class:`torch.Tensor`, list, tuple, dict]:\n Same as input but transferred to device.\n Goes through lists and dicts and transfers the torch.Tensor to\n device. Leaves the rest untouched.\n \"\"\"\n if isinstance(tensors, torch.Tensor):\n return tensors.to(device)\n elif isinstance(tensors, (list, tuple)):\n return [tensors_to_device(tens, device) for tens in tensors]\n elif isinstance(tensors, dict):\n for key in tensors.keys():\n tensors[key] = tensors_to_device(tensors[key], device)\n return tensors\n else:\n return tensors\n\n\ndef get_device(tensor_or_module, default=None):\n \"\"\"Get the device of a tensor or a module.\n\n Args:\n tensor_or_module (Union[torch.Tensor, torch.nn.Module]):\n The object to get the device from. Can be a ``torch.Tensor``,\n a ``torch.nn.Module``, or anything else that has a ``device`` attribute\n or a ``parameters() -> Iterator[torch.Tensor]`` method.\n default (Optional[Union[str, torch.device]]): If the device can not be\n determined, return this device instead. If ``None`` (the default),\n raise a ``TypeError`` instead.\n\n Returns:\n torch.device: The device that ``tensor_or_module`` is on.\n \"\"\"\n if hasattr(tensor_or_module, \"device\"):\n return tensor_or_module.device\n elif hasattr(tensor_or_module, \"parameters\"):\n return next(tensor_or_module.parameters()).device\n elif default is None:\n raise TypeError(f\"Don't know how to get device of {type(tensor_or_module)} object\")\n else:\n return torch.device(default)\n\n\ndef is_tracing():\n # Taken for pytorch for compat in 1.6.0\n \"\"\"\n Returns ``True`` in tracing (if a function is called during the tracing of\n code with ``torch.jit.trace``) and ``False`` otherwise.\n \"\"\"\n return torch._C._is_tracing()\n\n\ndef script_if_tracing(fn):\n # Taken for pytorch for compat in 1.6.0\n \"\"\"\n Compiles ``fn`` when it is first called during tracing. ``torch.jit.script``\n has a non-negligible start up time when it is first called due to\n lazy-initializations of many compiler builtins. Therefore you should not use\n it in library code. However, you may want to have parts of your library work\n in tracing even if they use control flow. In these cases, you should use\n ``@torch.jit.script_if_tracing`` to substitute for\n ``torch.jit.script``.\n\n Arguments:\n fn: A function to compile.\n\n Returns:\n If called during tracing, a :class:`ScriptFunction` created by `\n `torch.jit.script`` is returned. Otherwise, the original function ``fn`` is returned.\n \"\"\"\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n if not is_tracing():\n # Not tracing, don't do anything\n return fn(*args, **kwargs)\n\n compiled_fn = torch.jit.script(wrapper.__original_fn) # type: ignore\n return compiled_fn(*args, **kwargs)\n\n wrapper.__original_fn = fn # type: ignore\n wrapper.__script_if_tracing_wrapper = True # type: ignore\n\n return wrapper\n\n\n@script_if_tracing\ndef pad_x_to_y(x: torch.Tensor, y: torch.Tensor, axis: int = -1) -> torch.Tensor:\n \"\"\"Right-pad or right-trim first argument to have same size as second argument\n\n Args:\n x (torch.Tensor): Tensor to be padded.\n y (torch.Tensor): Tensor to pad `x` to.\n axis (int): Axis to pad on.\n\n Returns:\n torch.Tensor, `x` padded to match `y`'s shape.\n \"\"\"\n if axis != -1:\n raise NotImplementedError\n inp_len = y.shape[axis]\n output_len = x.shape[axis]\n return nn.functional.pad(x, [0, inp_len - output_len])\n\n\ndef load_state_dict_in(state_dict, model):\n \"\"\"Strictly loads state_dict in model, or the next submodel.\n Useful to load standalone model after training it with System.\n\n Args:\n state_dict (OrderedDict): the state_dict to load.\n model (torch.nn.Module): the model to load it into\n\n Returns:\n torch.nn.Module: model with loaded weights.\n\n .. note:: Keys in a state_dict look like ``object1.object2.layer_name.weight.etc``\n We first try to load the model in the classic way.\n If this fail we removes the first left part of the key to obtain\n ``object2.layer_name.weight.etc``.\n Blindly loading with ``strictly=False`` should be done with some logging\n of the missing keys in the state_dict and the model.\n\n \"\"\"\n try:\n # This can fail if the model was included into a bigger nn.Module\n # object. For example, into System.\n model.load_state_dict(state_dict, strict=True)\n except RuntimeError:\n # keys look like object1.object2.layer_name.weight.etc\n # The following will remove the first left part of the key to obtain\n # object2.layer_name.weight.etc.\n # Blindly loading with strictly=False should be done with some\n # new_state_dict of the missing keys in the state_dict and the model.\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n new_k = k[k.find(\".\") + 1 :]\n new_state_dict[new_k] = v\n model.load_state_dict(new_state_dict, strict=True)\n return model\n\n\ndef are_models_equal(model1, model2):\n \"\"\"Check for weights equality between models.\n\n Args:\n model1 (nn.Module): model instance to be compared.\n model2 (nn.Module): second model instance to be compared.\n\n Returns:\n bool: Whether all model weights are equal.\n \"\"\"\n for p1, p2 in zip(model1.parameters(), model2.parameters()):\n if p1.data.ne(p2.data).sum() > 0:\n return False\n return True\n\n\n@script_if_tracing\ndef jitable_shape(tensor):\n \"\"\"Gets shape of ``tensor`` as ``torch.Tensor`` type for jit compiler\n\n .. note::\n Returning ``tensor.shape`` of ``tensor.size()`` directly is not torchscript\n compatible as return type would not be supported.\n\n Args:\n tensor (torch.Tensor): Tensor\n\n Returns:\n torch.Tensor: Shape of ``tensor``\n \"\"\"\n return torch.tensor(tensor.shape)\n",
"import os\nimport time\nimport tqdm\nimport argparse\nimport subprocess\nimport pandas as pd\nfrom pathlib import Path\nimport concurrent.futures\n\nfrom constants import VIDEO_DIR\n\n\ndef download(link, path, final_name=None):\n command = \"youtube-dl {} --no-check-certificate --output {}.mp4 -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4'\"\n if os.path.exists(path) and os.path.isfile(path):\n print(\"File already downloaded\")\n return False\n if final_name is not None and os.path.isfile(final_name):\n print(\"File already cropped\")\n return True\n\n p = subprocess.Popen(\n command.format(link, path),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate()\n return False\n\n\ndef crop(path, start, end, downloaded_name):\n command = (\n \"ffmpeg -y -i {}.mp4 -ss {} -t {} -c:v libx264 -crf 18 -preset veryfast -pix_fmt yuv420p \"\n \"-c:a aac -b:a 128k -strict experimental -r 25 {}\"\n )\n\n start_minute, start_second = int(start // 60), int(start % 60)\n end_minute, end_second = int(end // 60) - start_minute, int(end % 60) - start_second\n\n new_filepath = downloaded_name + \"_final.mp4\"\n\n if os.path.exists(new_filepath) and os.path.isfile(new_filepath):\n return\n\n command = command.format(\n downloaded_name,\n f\"{start_minute}:{start_second}\",\n f\"{end_minute}:{end_second}\",\n new_filepath,\n )\n subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ).communicate()\n\n remove_orig_file = f\"rm -f {downloaded_name}.mp4\"\n subprocess.Popen(\n remove_orig_file, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ).communicate()\n\n\ndef save_video(zargs):\n link, path, start, end, pos_x, pos_y = zargs\n x = int(pos_x * 10000)\n y = int(pos_y * 10000)\n downloaded_name = path.as_posix() + f\"_{x}_{y}\"\n cropped = download(link, downloaded_name, final_name=downloaded_name + \"_final.mp4\")\n if not cropped:\n crop(path, start, end, downloaded_name)\n\n\ndef main(args):\n df = pd.read_csv(args.path)\n links = df.iloc[:, 0][args.start : args.end]\n start_times = df.iloc[:, 1][args.start : args.end]\n end_times = df.iloc[:, 2][args.start : args.end]\n pos_x = df.iloc[:, 3][args.start : args.end]\n pos_y = df.iloc[:, 4][args.start : args.end]\n\n yt_links = [\"https://youtube.com/watch\\?v\\=\" + l for l in links]\n paths = [Path(os.path.join(args.vid_dir, f)) for f in links]\n\n link_path = zip(yt_links, paths, start_times, end_times, pos_x, pos_y)\n with concurrent.futures.ThreadPoolExecutor(args.jobs) as executor:\n results = list(tqdm.tqdm(executor.map(save_video, link_path), total=len(links)))\n\n\nif __name__ == \"__main__\":\n parse = argparse.ArgumentParser(description=\"Download parameters\")\n parse.add_argument(\"--jobs\", type=int, default=1)\n parse.add_argument(\"--path\", type=str, default=\"../../data/audio_visual/avspeech_train.csv\")\n parse.add_argument(\"--vid-dir\", type=str, default=VIDEO_DIR)\n parse.add_argument(\"--start\", type=int, default=0)\n parse.add_argument(\"--end\", type=int, default=10_000)\n args = parse.parse_args()\n main(args)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.Conv1d"
],
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.utils.data.DataLoader",
"torch.nn.MSELoss"
],
[
"torch.mean",
"torch.einsum",
"torch.min",
"torch.gather",
"torch.unsqueeze",
"torch.arange",
"torch.stack",
"scipy.optimize.linear_sum_assignment",
"torch.index_select"
],
[
"torch.stack",
"torch.utils.data._utils.collate.default_collate",
"torch.sum",
"torch.randperm"
],
[
"torch.mean",
"pandas.Series",
"numpy.abs",
"torch.load",
"pandas.DataFrame",
"torch.no_grad"
],
[
"torch.no_grad",
"torch.tensor"
],
[
"torch.randn"
],
[
"torch.jit.script",
"torch.tensor",
"torch.device",
"torch._C._is_tracing",
"torch.nn.functional.pad"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hanfengzhai/DARPA-FFT | [
"61705c1dcbe7a75a54003db5e8f7db3717e3040c",
"61705c1dcbe7a75a54003db5e8f7db3717e3040c"
] | [
"code/id53.py",
"code/id56.py"
] | [
"import time\nfrom scipy import fftpack\nimport book_format\nbook_format.set_style()\nimport kf_book.kf_internal as kf_internal\nfrom kf_book.kf_internal import DogSimulation\nfrom kf_book import book_plots as book_plots\nimport numpy as np\nfrom matplotlib import pyplot\nimport scipy.io\nimport pandas as pd\nimport pandas_datareader as pdr\nimport seaborn as sns\nfrom pykrige.ok import OrdinaryKriging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nsys.path.insert(0, '../../results')\n\n\njj = 50\n\nwith open('Lat_new.txt', 'r') as f1:\n data1 = f1.read().split(); floats1 = []\n for elem1 in data1:\n try:\n floats1.append(float(elem1))\n except ValueError:\n pass\n\nlat = np.array(data1, dtype = np.float64);lat = np.array_split(lat, 86)\nx1 = lat\n\nwith open('Long_new.txt', 'r') as f2:\n data2 = f2.read().split(); floats2 = []\n for elem2 in data2:\n try:\n floats2.append(float(elem2))\n except ValueError:\n pass\n \nlongdat = np.array(data2, dtype = np.float64);longdat = np.array_split(longdat, 86)\nx2 = longdat\n\nx = np.linspace(0, 405, 405)\nx_benchmark = np.linspace(0, 405, 405)# 550\nxpred = np.linspace(405, 750, 345)#440 - 550\ny_lat = x1[jj][0:405]\ny_long = x2[jj][0:405]\n# y_benchmark = x1[jj][0:550]\n\ny_fft_lat = fftpack.dct(y_lat, norm=\"ortho\")\ny_fft_lat[5:] = 0\ny_filter_lat = fftpack.idct(y_fft_lat, norm=\"ortho\")\n\ny_fft_long = fftpack.dct(y_long, norm=\"ortho\")\ny_fft_long[5:] = 0\ny_filter_long = fftpack.idct(y_fft_long, norm=\"ortho\")\n\n\nt_lat = time.time()\n\nuk_fourier_lat = OrdinaryKriging(\n x, np.zeros(x.shape), y_filter_lat, variogram_model=\"power\"#, exact_values=False\n)\ny_fft_pred_lat, y_fft_std_lat = uk_fourier_lat.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_fourierkriging_lat = time.time() - t_lat\n\n\nuk_lat = OrdinaryKriging(\n x, np.zeros(x.shape), y_lat, variogram_model=\"power\"#, exact_values=False\n)\ny_pred_lat, y_std_lat = uk_lat.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_kriging_lat = time.time() - t_lat\n\n\nt_long = time.time()\n\nuk_fourier_long = OrdinaryKriging(\n x, np.zeros(x.shape), y_filter_long, variogram_model=\"power\"#, exact_values=False\n)\ny_fft_pred_long, y_fft_std_long = uk_fourier_long.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_fourierkriging_long = time.time() - t_long\n\n\nuk_long = OrdinaryKriging(\n x, np.zeros(x.shape), y_long, variogram_model=\"power\"#, exact_values=False\n)\ny_pred_long, y_std_long = uk_long.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_kriging_long = time.time() - t_long\n\n\ny_pred_lat = np.squeeze(y_pred_lat)\ny_std_lat = np.squeeze(y_std_lat)\ny_fft_pred_lat = np.squeeze(y_fft_pred_lat)\ny_fft_std_lat = np.squeeze(y_fft_std_lat)\n\ny_pred_long = np.squeeze(y_pred_long)\ny_std_long = np.squeeze(y_std_long)\ny_fft_pred_long = np.squeeze(y_fft_pred_long)\ny_fft_std_long = np.squeeze(y_fft_std_long)\n\n\ndat_24_lat = y_fft_pred_lat[135:161]\ndat_26_lat = y_fft_pred_lat[184:207]\ndat_28_lat = y_fft_pred_lat[230:253]\ndat_30_lat = y_fft_pred_lat[276:299]\ndat_2_lat = y_fft_pred_lat[322:345]\n\ndat_24_long = y_fft_pred_long[135:161]\ndat_26_long = y_fft_pred_long[184:207]\ndat_28_long = y_fft_pred_long[230:253]\ndat_30_long = y_fft_pred_long[276:299]\ndat_2_long = y_fft_pred_long[322:345]\n\n# =====================================\n\npred_24_lat = np.mean(dat_24_lat)\npred_26_lat = np.mean(dat_26_lat)\npred_28_lat = np.mean(dat_28_lat)\npred_30_lat = np.mean(dat_30_lat)\npred_2_lat = np.mean(dat_2_lat)\n\npred_24_long = np.mean(dat_24_long)\npred_26_long = np.mean(dat_26_long)\npred_28_long = np.mean(dat_28_long)\npred_30_long = np.mean(dat_30_long)\npred_2_long = np.mean(dat_2_long)\n\n# ========SAVE FINAL DATA PREDICTION=========\n\nfinal_pred = [[pred_24_lat, pred_26_lat, pred_28_lat, pred_30_lat, pred_2_lat],[pred_24_long, pred_26_long, pred_28_long, pred_30_long, pred_2_long]]\n\nnp.savetxt(('id'+str(jj)+'.txt'),final_pred)\n",
"import time\nfrom scipy import fftpack\nimport book_format\nbook_format.set_style()\nimport kf_book.kf_internal as kf_internal\nfrom kf_book.kf_internal import DogSimulation\nfrom kf_book import book_plots as book_plots\nimport numpy as np\nfrom matplotlib import pyplot\nimport scipy.io\nimport pandas as pd\nimport pandas_datareader as pdr\nimport seaborn as sns\nfrom pykrige.ok import OrdinaryKriging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nsys.path.insert(0, '../../results')\n\n\njj = 53\n\nwith open('Lat_new.txt', 'r') as f1:\n data1 = f1.read().split(); floats1 = []\n for elem1 in data1:\n try:\n floats1.append(float(elem1))\n except ValueError:\n pass\n\nlat = np.array(data1, dtype = np.float64);lat = np.array_split(lat, 86)\nx1 = lat\n\nwith open('Long_new.txt', 'r') as f2:\n data2 = f2.read().split(); floats2 = []\n for elem2 in data2:\n try:\n floats2.append(float(elem2))\n except ValueError:\n pass\n \nlongdat = np.array(data2, dtype = np.float64);longdat = np.array_split(longdat, 86)\nx2 = longdat\n\nx = np.linspace(0, 405, 405)\nx_benchmark = np.linspace(0, 405, 405)# 550\nxpred = np.linspace(405, 750, 345)#440 - 550\ny_lat = x1[jj][0:405]\ny_long = x2[jj][0:405]\n# y_benchmark = x1[jj][0:550]\n\ny_fft_lat = fftpack.dct(y_lat, norm=\"ortho\")\ny_fft_lat[5:] = 0\ny_filter_lat = fftpack.idct(y_fft_lat, norm=\"ortho\")\n\ny_fft_long = fftpack.dct(y_long, norm=\"ortho\")\ny_fft_long[5:] = 0\ny_filter_long = fftpack.idct(y_fft_long, norm=\"ortho\")\n\n\nt_lat = time.time()\n\nuk_fourier_lat = OrdinaryKriging(\n x, np.zeros(x.shape), y_filter_lat, variogram_model=\"power\"#, exact_values=False\n)\ny_fft_pred_lat, y_fft_std_lat = uk_fourier_lat.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_fourierkriging_lat = time.time() - t_lat\n\n\nuk_lat = OrdinaryKriging(\n x, np.zeros(x.shape), y_lat, variogram_model=\"power\"#, exact_values=False\n)\ny_pred_lat, y_std_lat = uk_lat.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_kriging_lat = time.time() - t_lat\n\n\nt_long = time.time()\n\nuk_fourier_long = OrdinaryKriging(\n x, np.zeros(x.shape), y_filter_long, variogram_model=\"power\"#, exact_values=False\n)\ny_fft_pred_long, y_fft_std_long = uk_fourier_long.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_fourierkriging_long = time.time() - t_long\n\n\nuk_long = OrdinaryKriging(\n x, np.zeros(x.shape), y_long, variogram_model=\"power\"#, exact_values=False\n)\ny_pred_long, y_std_long = uk_long.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_kriging_long = time.time() - t_long\n\n\ny_pred_lat = np.squeeze(y_pred_lat)\ny_std_lat = np.squeeze(y_std_lat)\ny_fft_pred_lat = np.squeeze(y_fft_pred_lat)\ny_fft_std_lat = np.squeeze(y_fft_std_lat)\n\ny_pred_long = np.squeeze(y_pred_long)\ny_std_long = np.squeeze(y_std_long)\ny_fft_pred_long = np.squeeze(y_fft_pred_long)\ny_fft_std_long = np.squeeze(y_fft_std_long)\n\n\ndat_24_lat = y_fft_pred_lat[135:161]\ndat_26_lat = y_fft_pred_lat[184:207]\ndat_28_lat = y_fft_pred_lat[230:253]\ndat_30_lat = y_fft_pred_lat[276:299]\ndat_2_lat = y_fft_pred_lat[322:345]\n\ndat_24_long = y_fft_pred_long[135:161]\ndat_26_long = y_fft_pred_long[184:207]\ndat_28_long = y_fft_pred_long[230:253]\ndat_30_long = y_fft_pred_long[276:299]\ndat_2_long = y_fft_pred_long[322:345]\n\n# =====================================\n\npred_24_lat = np.mean(dat_24_lat)\npred_26_lat = np.mean(dat_26_lat)\npred_28_lat = np.mean(dat_28_lat)\npred_30_lat = np.mean(dat_30_lat)\npred_2_lat = np.mean(dat_2_lat)\n\npred_24_long = np.mean(dat_24_long)\npred_26_long = np.mean(dat_26_long)\npred_28_long = np.mean(dat_28_long)\npred_30_long = np.mean(dat_30_long)\npred_2_long = np.mean(dat_2_long)\n\n# ========SAVE FINAL DATA PREDICTION=========\n\nfinal_pred = [[pred_24_lat, pred_26_lat, pred_28_lat, pred_30_lat, pred_2_lat],[pred_24_long, pred_26_long, pred_28_long, pred_30_long, pred_2_long]]\n\nnp.savetxt(('id'+str(jj)+'.txt'),final_pred)\n"
] | [
[
"scipy.fftpack.idct",
"numpy.linspace",
"numpy.squeeze",
"numpy.array_split",
"numpy.mean",
"numpy.array",
"scipy.fftpack.dct",
"numpy.zeros"
],
[
"scipy.fftpack.idct",
"numpy.linspace",
"numpy.squeeze",
"numpy.array_split",
"numpy.mean",
"numpy.array",
"scipy.fftpack.dct",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
miyuush/AtCoder | [
"9481f15b69b99f56334a623f5a63dbb5e6359522"
] | [
"contest/20190303_ABC120/D.py"
] | [
"# Union-findを使う\n# 論文検索には(Disjoint Set)\n\n\nfrom scipy.special import comb\n\nn, m = map(int, input().split())\na = []\nb = []\nfor i in range(m):\n a0, b0 = [int(i) for i in input().split()]\n a.append(a0)\n b.append(b0)\n\na.append(b)\nl = len(a)\ncmb = comb(m, 2, exact=True)\nbase = int(cmb / 2) + 1\n\nfor _ in range(m):\n l -= 2\n if l > base:\n print(0)\n elif l == 4:\n print(cmb-2)\n elif l == 2:\n print(cmb-1)\n elif l == 0:\n print(cmb)\n else:\n print(base)\n base += 1"
] | [
[
"scipy.special.comb"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
peterorum/ashrae | [
"6527eb71b2102565bb71b402db700b561cea138c"
] | [
"src/001-constant.py"
] | [
"# baseline: constant 0\n# local score 4.668\n# kaggle score 4.69\n\nimport sys # pylint: disable=unused-import\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error\nfrom time import time\n\nimport os\n\nis_kaggle = os.environ['HOME'] == '/tmp'\n\nzipext = '' if is_kaggle else '.zip'\n\n# load data\ntrain = pd.read_csv(f'../input/train.csv{zipext}')\ntest = pd.read_csv(f'../input/test.csv{zipext}')\n\n#-------- main\n\nstart_time = time()\n\ntarget = 'meter_reading'\n\nresult = 0\n\ntrain['predicted'] = result\n\nscore = np.sqrt(mean_squared_error(np.log1p(train[target]), np.log1p(train.predicted)))\n\nprint('score', score)\n\ntest[target] = result\n\npredictions = test[['row_id', target]]\n\npredictions.to_csv('submission.csv', index=False)\n\nprint('%.0f mins' % ((time() - start_time) / 60))\n"
] | [
[
"numpy.log1p",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Danielhp95/Regym | [
"f0f0be0ad23bf1a3410ecd9ed9b8025947d6080a",
"f0f0be0ad23bf1a3410ecd9ed9b8025947d6080a",
"f0f0be0ad23bf1a3410ecd9ed9b8025947d6080a"
] | [
"regym/rl_algorithms/TQL/repeated_update_q_learning.py",
"regym/training_schemes/psro.py",
"regym/networks/bodies.py"
] | [
"import numpy as np\n\n\nclass RepeatedUpdateQLearningAlgorithm():\n '''\n Repeated Update Q Learning (RUQL) as introduced in:\n \"Addressing the Policy Bias of Q-Learning by Repeating Updates\" - Sherief Abdallah, Michael Kaisers\n '''\n def __init__(self, state_space_size, action_space_size, hashing_function, discount_factor, learning_rate, temperature):\n self.Q_table = np.zeros((state_space_size, action_space_size), dtype=np.float64)\n self.learning_rate = learning_rate\n self.hashing_function = hashing_function\n self.temperature = temperature\n self.discount_factor = discount_factor\n\n def update_q_table(self, s, a, r, succ_s):\n s, succ_s = self.hashing_function(s), self.hashing_function(succ_s)\n probability_taking_action_a = self.boltzman_exploratory_policy_from_state(s)[a]\n x = (1 - self.learning_rate)**(1 / probability_taking_action_a)\n self.Q_table[s, a] = x * self.Q_table[s, a] + (1 - x) * (r + self.discount_factor * max(self.Q_table[succ_s, :]))\n\n def boltzman_exploratory_policy_from_state(self, s):\n exp_q_values = np.exp([self.Q_table[s, i] / self.temperature for i in range(self.Q_table.shape[1])])\n normalizing_constant = sum(exp_q_values)\n return np.divide(exp_q_values, normalizing_constant)\n\n def find_moves(self, state, exploration):\n state = self.hashing_function(state)\n if exploration:\n p = self.boltzman_exploratory_policy_from_state(state)\n return np.random.choice(range(self.Q_table.shape[1]), p=p)\n else:\n optimal_moves = np.argwhere(self.Q_table[state, :] == np.amax(self.Q_table[state, :]))\n return np.random.choice(optimal_moves.flatten().tolist())\n",
"'''\nImplementation of Policy-Spaced Response Oracles (PSRO) as first introduced in Lanctot et al 2017:\nhttp://papers.nips.cc/paper/7007-a-unified-game-theoretic-approach-to-multiagent-reinforcement-learning\n\nTODO: difference between PSRO which takes 3 separate stages and our method, which is an online method.\n'''\nfrom typing import Callable, List\nfrom copy import deepcopy\nimport dill\nimport logging\nimport time\nfrom itertools import product\n\nimport torch\nimport numpy as np\n\nfrom regym.game_theory import solve_zero_sum_game\nfrom regym.util import play_multiple_matches\nfrom regym.environments import generate_task, Task, EnvType\nfrom regym.rl_loops import Trajectory\n\n\ndef default_meta_game_solver(winrate_matrix: np.ndarray):\n return solve_zero_sum_game(winrate_matrix)[0].reshape((-1))\n\n\nclass PSRONashResponse():\n\n def __init__(self,\n task: Task,\n meta_game_solver: Callable = default_meta_game_solver,\n threshold_best_response: float = 0.7,\n benchmarking_episodes: int = 10,\n match_outcome_rolling_window_size: int = 10):\n '''\n :param task: Multiagent task\n :param meta_game_solver: Function which takes a meta-game and returns a probability\n distribution over the policies in the meta-game.\n Default uses maxent-Nash equilibrium for the logodds transformation\n of the winrate_matrix metagame.\n :param threshold_best_response: Winrate thrshold after which the agent being\n trained is to converge towards a best response\n againts the current meta-game solution.\n :param benchmarking_episodes: Number of episodes that will be used to compute winrates\n to fill the metagame.\n :param match_outcome_rolling_window_size: Number of episodes that will be used to\n decide whether the currently training agent\n has converged to a best response.\n '''\n self.name = f'PSRO(M=maxentNash,O=BestResponse(wr={threshold_best_response},ws={match_outcome_rolling_window_size})'\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(logging.INFO)\n self.check_parameter_validity(task, threshold_best_response,\n benchmarking_episodes,\n match_outcome_rolling_window_size)\n self.task = task\n\n self.meta_game_solver = meta_game_solver\n self.meta_game: np.ndarray = None\n self.meta_game_solution: np.ndarray = None\n self.menagerie = []\n\n self.threshold_best_response = threshold_best_response\n self.match_outcome_rolling_window = []\n self.match_outcome_rolling_window_size = match_outcome_rolling_window_size\n\n self.benchmarking_episodes = benchmarking_episodes\n\n self.statistics = [self.IterationStatistics(0, 0, 0, [0], np.nan)]\n\n def opponent_sampling_distribution(self, menagerie: List['Agent'], training_agent: 'Agent'):\n '''\n :param menagerie: archive of agents selected by the curator and the potential opponents\n :param training_agent: Agent currently being trained\n '''\n if len(menagerie) == 0 and len(self.menagerie) == 0:\n self.init_meta_game_and_solution(training_agent)\n sampled_index = np.random.choice([i for i in range(len(self.menagerie))],\n p=self.meta_game_solution)\n self.statistics[-1].menagerie_picks[sampled_index] += 1\n return [self.menagerie[sampled_index]]\n\n def init_meta_game_and_solution(self, training_agent):\n self.add_agent_to_menagerie(training_agent)\n self.meta_game = np.array([[0.5]])\n self.meta_game_solution = np.array([1.0])\n\n def curator(self, menagerie: List['Agent'], training_agent: 'Agent',\n episode_trajectory: Trajectory,\n training_agent_index: int,\n candidate_save_path: str) -> List['Agent']:\n '''\n :param menagerie: archive of agents selected by the curator and the potential opponents\n :param training_agent: Agent currently being trained\n :returns: menagerie to be used in the next training episode.\n '''\n self.statistics[-1].total_elapsed_episodes += 1\n self.statistics[-1].current_iteration_elapsed_episodes += 1\n\n self.update_rolling_winrates(episode_trajectory, training_agent_index)\n if self.has_policy_converged():\n self.add_agent_to_menagerie(training_agent, candidate_save_path)\n self.update_meta_game()\n self.update_meta_game_solution()\n self.match_outcome_rolling_window = []\n self.statistics += [self.create_new_iteration_statistics(self.statistics[-1])]\n self.statistics[-1].meta_game_solution = self.meta_game_solution\n return self.menagerie\n\n def has_policy_converged(self):\n current_winrate = (sum(self.match_outcome_rolling_window) \\\n / self.match_outcome_rolling_window_size)\n return current_winrate >= self.threshold_best_response\n\n def update_rolling_winrates(self, episode_trajectory: Trajectory, training_agent_index: int):\n victory = int(episode_trajectory.winner == training_agent_index)\n if len(self.match_outcome_rolling_window) >= self.match_outcome_rolling_window_size:\n self.match_outcome_rolling_window.pop(0)\n self.match_outcome_rolling_window.append(victory)\n\n def update_meta_game_solution(self, update=False):\n self.logger.info(f'START: Solving metagame. Size: {len(self.menagerie)}')\n start_time = time.time()\n self.meta_game_solution = self.meta_game_solver(self.meta_game)\n time_elapsed = time.time() - start_time\n self.statistics[-1].time_elapsed_meta_game_solution = time_elapsed\n self.logger.info(f'FINISH: Solving metagame. time: {time_elapsed}')\n return self.meta_game_solution\n\n def update_meta_game(self):\n self.logger.info(f'START: updating metagame. Size: {len(self.menagerie)}')\n start_time = time.time()\n number_old_policies = len(self.menagerie) - 1\n updated_meta_game = np.full(((len(self.menagerie)), len(self.menagerie)),\n np.nan)\n\n # Filling the matrix with already-known values.\n updated_meta_game[:number_old_policies, :number_old_policies] = self.meta_game\n\n self.fill_meta_game_missing_entries(self.menagerie, updated_meta_game, self.benchmarking_episodes,\n self.task)\n self.meta_game = updated_meta_game\n time_elapsed = time.time() - start_time\n self.statistics[-1].time_elapsed_meta_game_update = time_elapsed\n self.logger.info(f'FINISH: updating metagame. time: {time_elapsed}')\n return updated_meta_game\n\n def fill_meta_game_missing_entries(self, policies: List,\n updated_meta_game: np.ndarray,\n benchmarking_episodes: int, task: Task):\n indices_to_fill = product(range(updated_meta_game.shape[0]),\n [updated_meta_game.shape[0] - 1])\n for i, j in indices_to_fill:\n # TODO: maybe use regym.evaluation. benchmark on tasks?\n if i == j: updated_meta_game[j, j] = 0.5\n else:\n winrate_estimate = self.estimate_winrate(task, policies[i],\n policies[j], benchmarking_episodes)\n # Because we are asumming a symmetrical zero-sum game.\n updated_meta_game[i, j] = winrate_estimate\n updated_meta_game[j, i] = 1 - winrate_estimate\n return updated_meta_game\n\n def estimate_winrate(self, task: Task, policy_1: 'Agent', policy_2: 'Agent',\n benchmarking_episodes: int) -> float:\n '''\n ASSUMPTION: Task is a 2-player, non-symmetrical game.\n Thus the :param: policies need to be benchmarked on both positions.\n '''\n first_position_winrates = play_multiple_matches(\n task=task, agent_vector=[policy_1, policy_2],\n n_matches=(benchmarking_episodes // 2))\n second_position_winrates = play_multiple_matches(\n task=task, agent_vector=[policy_2, policy_1],\n n_matches=(benchmarking_episodes // 2))\n policy_1_overall_winrate = (first_position_winrates[0] +\n second_position_winrates[1]) / 2\n return policy_1_overall_winrate\n\n def add_agent_to_menagerie(self, training_agent, candidate_save_path=None):\n if candidate_save_path: torch.save(training_agent, candidate_save_path)\n cloned_agent = deepcopy(training_agent)\n cloned_agent.training_agent = False\n self.menagerie.append(cloned_agent)\n\n def create_new_iteration_statistics(self, last_iteration_statistics):\n return self.IterationStatistics(len(self.statistics), last_iteration_statistics.total_elapsed_episodes,\n 0, [0] * len(self.menagerie),\n self.meta_game_solution)\n\n def check_parameter_validity(self, task, threshold_best_response,\n benchmarking_episodes,\n match_outcome_rolling_window_size):\n if task.env_type == EnvType.SINGLE_AGENT:\n raise ValueError('Task provided: {task.name} is singleagent. PSRO is a multiagent ' +\n 'meta algorithm. It only opperates on multiagent tasks')\n if not(0 <= threshold_best_response <= 1):\n raise ValueError('Parameter \\'threshold_best_response\\' represents ' +\n 'a winrate (a probability). It must lie between [0, 1]')\n if not(0 < benchmarking_episodes):\n raise ValueError('Parameter \\'benchmarking_episodes\\' must be strictly positive')\n if not(0 < match_outcome_rolling_window_size):\n raise ValueError('Parameter \\'benchmarking_episodes\\' corresponds to ' +\n 'the lenght of a list. It must be strictly positive')\n\n def __repr__(self):\n return f'{self.name}. Meta-game size: {len(self.menagerie)}.'\n\n class IterationStatistics():\n def __init__(self, iteration_number: int,\n total_elapsed_episodes: int,\n current_iteration_elapsed_episodes: int,\n menagerie_picks: List[int],\n meta_game_solution: np.ndarray):\n '''\n Data class containing information for each of the PSRO iterations\n '''\n self.iteration_number = iteration_number\n self.total_elapsed_episodes = total_elapsed_episodes\n self.current_iteration_elapsed_episodes = current_iteration_elapsed_episodes\n self.menagerie_picks = menagerie_picks\n self.meta_game_solution = meta_game_solution\n self.time_elapsed_meta_game_solution = np.nan\n self.time_elapsed_meta_game_update = np.nan\n\n def __repr__(self):\n s = \\\n f'''\n Iteration: {self.iteration_number}\n Total elapsed episodes: {self.total_elapsed_episodes}\n Current iteration elapsed episodes: {self.current_iteration_elapsed_episodes}\n Menagerie picks: {self.menagerie_picks}\n Time elapsed M: {self.time_elapsed_meta_game_solution}\n Time elapsed Winrate matrix: {self.time_elapsed_meta_game_update}\n '''\n return s\n",
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\nfrom typing import List, Callable, Iterable, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom regym.networks.utils import layer_init, layer_init_lstm\nfrom regym.networks.utils import convolutional_layer_output_dimensions, compute_convolutional_dimension_transforms\nfrom regym.networks.utils import create_convolutional_layers\n\n\nclass SequentialBody(nn.Module):\n '''\n A wrapper around torch.nn.Sequential so that it exposes property 'feature_dim'\n '''\n\n def __init__(self, bodies: Iterable[nn.Module]):\n super().__init__()\n self.sequence = nn.Sequential(*bodies)\n self.feature_dim = self.sequence[-1].feature_dim\n\n def forward(self, x: torch.Tensor):\n return self.sequence(x)\n\n\nclass Convolutional2DBody(nn.Module):\n def __init__(self, input_shape: Tuple[int, int],\n channels: List[int], kernel_sizes: List[int],\n paddings: List[int], strides: List[int],\n final_feature_dim: int = 256,\n residual_connections: List[Tuple[int, int]] = [],\n use_batch_normalization=False,\n gating_function: Callable = F.relu):\n '''\n NOTE: The output comes from a fully connected layer, not a convolution.\n\n :param input_shape: (Height x Width) dimensions of input tensors\n :param channels: List with number of channels for each convolution\n :param kernel_sizes: List of 'k' the size of the square kernel sizes for each convolution\n :param paddings: List with square paddings 'p' for each convolution\n :param strides: List with square stridings 's' for each convolution\n :param final_feature_dim: Size of the 1D fully connected layer that happens\n after all convolutions.\n :param residual_connections: (l1, l2) tuples denoting that output\n from l1 should be added to input of l2\n :param use_batch_normalization: Whether to use BatchNorm2d after each convolution\n :param gating_function: Gating function to use after each convolution\n '''\n super().__init__()\n self.check_input_validity(channels, kernel_sizes, paddings, strides, final_feature_dim)\n self.gating_function = gating_function\n height_in, width_in = input_shape\n\n self.dimensions = compute_convolutional_dimension_transforms(\n height_in, width_in, channels, kernel_sizes, paddings, strides)\n\n convs = self.layer_connections(self.dimensions, residual_connections,\n i_in=0, i_max=(len(self.dimensions) - 1), # check i_max is correct\n Cs=channels, Ks=kernel_sizes,\n Ps=paddings, Ss=strides,\n use_batch_normalization=use_batch_normalization)\n self.convolutions = nn.ModuleList(convs)\n\n output_height, output_width = self.dimensions[-1]\n\n '''\n Creates the final layer of the convolutional body, which transforms the\n final convolution's feature map into a fully connected layer, to make it\n easier to stich together with other layers\n '''\n flattened_input = output_height * output_width * channels[-1]\n self.final_fc_layer = nn.Linear(flattened_input, final_feature_dim)\n self.feature_dim = final_feature_dim\n\n def forward(self, x: torch.Tensor):\n conv_map = x\n for convolution in self.convolutions:\n conv_map = self.gating_function(convolution(conv_map))\n # Without start_dim, we are flattening over the entire batch!\n flattened_conv_map = conv_map.flatten(start_dim=1)\n flat_embedding = self.gating_function(self.final_fc_layer(flattened_conv_map))\n return flat_embedding\n\n def check_input_validity(self, channels, kernel_sizes, paddings, strides, final_feature_dim):\n if len(channels) < 2: raise ValueError('At least 2 channels must be specified')\n if len(kernel_sizes) != (len(channels) - 1):\n raise ValueError(f'{len(kernel_sizes)} kernel_sizes were specified, but exactly {len(channels) -1} are required')\n if len(kernel_sizes) != (len(channels) - 1):\n raise ValueError(f'{len(kernel_sizes)} kernel_sizes were specified, but exactly {len(channels) -1} are required')\n if len(paddings) != (len(channels) - 1):\n raise ValueError(f'{len(paddings)} paddings were specified, but exactly {len(channels) -1} are required')\n if len(strides) != (len(channels) - 1):\n raise ValueError(f'{len(strides)} strides were specified, but exactly {len(channels) -1} are required')\n if final_feature_dim <= 0:\n raise ValueError('Param final_feature_dim corresponds to the number '\n 'of neurons to have on the final output layer of a Convolutional2DBody. '\n f'It must be positive (i.e = 256). Given: {final_feature_dim}')\n\n\n def layer_connections(self, dimensions: List[Tuple[int, int]],\n residual_connections: List[Tuple[int, int]],\n i_in, i_max,\n Cs, Ks, Ps, Ss,\n use_batch_normalization) -> List[nn.Module]:\n if i_in == i_max: return []\n if residual_connections == []:\n return [create_convolutional_layers(Cs[i_in:], Ks[i_in:], Ps[i_in:],\n Ss[i_in:], use_batch_normalization)]\n l_in, l_out = residual_connections[0]\n if l_in == i_in: # Start of residual block\n length = slice(l_in, l_out+1)\n res = ConvolutionalResidualBlock(dimensions[l_in],\n Cs[length], Ks[length], Ps[length], Ss[length],\n use_batch_normalization)\n return [res] + self.layer_connections(dimensions, residual_connections[1:],\n l_out, i_max, Cs, Ks, Ps, Ss, use_batch_normalization)\n if l_in > i_in: # Start of non-residual block\n length = slice(i_in, l_in + 1)\n con = create_convolutional_layers(Cs[length], Ks[length],\n Ps[length], Ss[length],\n use_batch_normalization)\n return [con] + self.layer_connections(dimensions, residual_connections,\n l_in, i_max, Cs, Ks, Ps, Ss, use_batch_normalization)\n\n\nclass ConvolutionalResidualBlock(nn.Module):\n\n def __init__(self, input_shape: Tuple[int, int],\n channels: List[int], kernel_sizes: List[int],\n paddings: List[int], strides: List[int],\n use_batch_normalization=False,\n gating_function: Callable = F.relu):\n # TODO: document that a residual block cannot have stride / padding bigger than 1.\n # or at least, Regym does not support it.\n super().__init__()\n self.use_1x1conv = channels[0] != channels[-1]\n if self.use_1x1conv:\n self.residual_conv = nn.Conv2d(channels[0], channels[-1], kernel_size=1)\n\n height_in, width_in = input_shape\n self.dimensions = compute_convolutional_dimension_transforms(\n height_in, width_in, channels, kernel_sizes, paddings, strides)\n self.convolutions = create_convolutional_layers(\n channels, kernel_sizes, paddings, strides, use_batch_normalization)\n\n self.gating_function = gating_function\n output_height, output_width = self.dimensions[-1]\n self.feature_dim = output_height * output_width * channels[-1]\n\n def forward(self, x):\n x2 = x\n for convolution in self.convolutions:\n x2 = self.gating_function(convolution(x2))\n if self.use_1x1conv: x = self.residual_conv(x)\n return x + x2\n\n\nclass FCBody(nn.Module):\n def __init__(self, state_dim, hidden_units=[64, 64], gate=F.leaky_relu):\n super(FCBody, self).__init__()\n dims = [state_dim] + hidden_units\n self.layers = nn.ModuleList([layer_init(nn.Linear(dim_in, dim_out))\n for dim_in, dim_out\n in zip(dims[:-1], dims[1:])])\n self.gate = gate\n self.feature_dim = dims[-1]\n\n def forward(self, x):\n for layer in self.layers:\n x = self.gate(layer(x))\n return x\n\n\nclass LSTMBody(nn.Module):\n def __init__(self, state_dim, hidden_units=[256], gate=F.leaky_relu):\n super(LSTMBody, self).__init__()\n dims = (state_dim, ) + hidden_units\n # Consider future cases where we may not want to initialize the LSTMCell(s)\n self.layers = nn.ModuleList([layer_init_lstm(nn.LSTMCell(dim_in, dim_out)) for dim_in, dim_out in zip(dims[:-1], dims[1:])])\n self.feature_dim = dims[-1]\n self.gate = gate\n\n def forward(self, x):\n '''\n :param x: input to LSTM cells. Structured as (input, (hidden_states, cell_states)).\n hidden_states: list of hidden_state(s) one for each self.layers.\n cell_states: list of hidden_state(s) one for each self.layers.\n '''\n x, (hidden_states, cell_states) = x\n next_hstates, next_cstates = [], []\n for idx, (layer, hx, cx) in enumerate(zip(self.layers, hidden_states, cell_states) ):\n batch_size = x.size(0)\n if hx.size(0) == 1: #then we have just resetted the values, we need to expand those:\n hx = torch.cat( [hx]*batch_size, dim=0)\n cx = torch.cat( [cx]*batch_size, dim=0)\n elif hx.size(0) != batch_size:\n raise NotImplemented(\"Sizes of the hidden states and the inputs do not coincide.\")\n\n nhx, ncx = layer(x, (hx, cx) )\n next_hstates.append(nhx)\n next_cstates.append(ncx)\n # Consider not applying activation functions on last layer's output\n if self.gate is not None:\n x = self.gate(nhx)\n\n return x, (next_hstates, next_cstates)\n\n def get_reset_states(self, cuda=False):\n hidden_states, cell_states = [], []\n for layer in self.layers:\n h = torch.zeros(1,layer.hidden_size)\n if cuda:\n h = h.cuda()\n hidden_states.append(h)\n cell_states.append(h)\n return (hidden_states, cell_states)\n\n\nclass DummyBody(nn.Module):\n def __init__(self, state_dim):\n super(DummyBody, self).__init__()\n self.feature_dim = state_dim\n\n def forward(self, x):\n return x\n"
] | [
[
"numpy.amax",
"numpy.zeros",
"numpy.divide"
],
[
"numpy.array",
"torch.save"
],
[
"torch.nn.Sequential",
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.LSTMCell",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PandoraLS/WG-WaveNet | [
"5f27e61cc4d3554af8c16fa35345831099b703e8"
] | [
"model/loss.py"
] | [
"import torch\nimport librosa\nimport numpy as np\nimport torch.nn.functional as F\nfrom hparams import hparams as hps\nfrom utils.util import to_arr, mode\n\n\nclass Loss(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper(Loss, self).__init__()\n\t\tself.d = 2*hps.sigma*hps.sigma\n\t\tself.loss = MultiResolutionSTFTLoss(hps.fft_sizes, hps.hop_sizes,\n\t\t\t\t\t\t\t\t\t\t\thps.win_lengths, hps.mel_scales)\n\n\tdef forward(self, model_output, p_wavs = None, r_wavs = None):\n\t\t# zloss\n\t\tz, log_s_list, log_w_list = model_output\n\t\tlog_s_total = 0\n\t\tlog_w_total = 0\n\t\tfor i, log_s in enumerate(log_s_list):\n\t\t\tlog_s_total += torch.sum(log_s)\n\t\t\tlog_w_total += torch.sum(log_w_list[i])\n\t\tzloss = torch.sum(z*z)/self.d-log_s_total-log_w_total\n\t\tzloss /= (z.size(0)*z.size(1)*z.size(2))\n\t\t\n\t\t# sloss\n\t\tsloss = self.loss(p_wavs, r_wavs) if p_wavs is not None else 0*zloss\n\n\t\treturn zloss+sloss, zloss, sloss\n\n\nclass MultiResolutionSTFTLoss(torch.nn.Module):\n\t# ref: https://github.com/kan-bayashi/ParallelWaveGAN\n\t\"\"\"Multi resolution STFT loss module.\"\"\"\n\tdef __init__(self,\n\t\t\t\t fft_sizes=[1024, 2048, 512],\n\t\t\t\t hop_sizes=[120, 240, 50],\n\t\t\t\t win_lengths=[600, 1200, 240],\n\t\t\t\t mel_scales=[1, 1, 1],\n\t\t\t\t window=\"hann_window\"):\n\t\t\"\"\"Initialize Multi resolution STFT loss module.\n\n\t\tArgs:\n\t\t\tfft_sizes (list): List of FFT sizes.\n\t\t\thop_sizes (list): List of hop sizes.\n\t\t\twin_lengths (list): List of window lengths.\n\t\t\twindow (str): Window function type.\n\n\t\t\"\"\"\n\t\tsuper(MultiResolutionSTFTLoss, self).__init__()\n\t\tassert len(fft_sizes) == len(hop_sizes) == len(win_lengths)\n\t\tself.stft_losses = torch.nn.ModuleList()\n\t\tself.bases = []\n\t\tfor fs, ss, wl, sc in zip(fft_sizes, hop_sizes, win_lengths, mel_scales):\n\t\t\tself.stft_losses += [STFTLoss(fs, ss, wl, window)]\n\t\t\tb = librosa.filters.mel(hps.sample_rate, fs, n_mels = hps.num_mels*sc, fmax = hps.fmax).T\n\t\t\tself.bases += [mode(torch.Tensor(b))]\n\n\tdef forward(self, x, y):\n\t\t\"\"\"Calculate forward propagation.\n\n\t\tArgs:\n\t\t\tx (Tensor): Predicted signal (B, T).\n\t\t\ty (Tensor): Groundtruth signal (B, T).\n\n\t\tReturns:\n\t\t\tTensor: Multi resolution spectral convergence loss value.\n\t\t\tTensor: Multi resolution log spectral loss value.\n\n\t\t\"\"\"\n\t\tsc_loss = 0.0\n\t\tspec_loss = 0.0\n\t\tfor f, b in zip(self.stft_losses, self.bases):\n\t\t\tsc_l, spec_l = f(x, y, b)\n\t\t\tsc_loss += sc_l\n\t\t\tspec_loss += spec_l\n\t\tsc_loss /= len(self.stft_losses)\n\t\tspec_loss /= len(self.stft_losses)\n\n\t\treturn sc_loss+spec_loss\n\n\nclass STFTLoss(torch.nn.Module):\n\t\"\"\"STFT loss module.\"\"\"\n\n\tdef __init__(self, fft_size=1024, shift_size=120, win_length=600, window=\"hann_window\"):\n\t\t\"\"\"Initialize STFT loss module.\"\"\"\n\t\tsuper(STFTLoss, self).__init__()\n\t\tself.fft_size = fft_size\n\t\tself.shift_size = shift_size\n\t\tself.win_length = win_length\n\t\tself.window = mode(getattr(torch, window)(win_length))\n\n\tdef forward(self, x, y, b):\n\t\t\"\"\"Calculate forward propagation.\n\n\t\tArgs:\n\t\t\tx (Tensor): Predicted signal (B, T).\n\t\t\ty (Tensor): Groundtruth signal (B, T).\n\t\t\tb (Tensor): Mel basis (fft_size//2+1, num_mels).\n\n\t\tReturns:\n\t\t\tTensor: Spectral convergence loss value.\n\t\t\tTensor: Log STFT magnitude loss value.\n\n\t\t\"\"\"\n\t\tx_mag, x_mel = stft(x, self.fft_size, self.shift_size, self.win_length, self.window, b)\n\t\ty_mag, y_mel = stft(y, self.fft_size, self.shift_size, self.win_length, self.window, b)\n\t\tsc_loss = spec_loss = 0\n\t\tif hps.mag:\n\t\t\th = x_mag.size(2)*2*hps.fmax//hps.sample_rate if hps.sample_rate >= 2*hps.fmax else x_mag.size(2)\n\t\t\tx_mag_ = x_mag[:, :, :h]\n\t\t\ty_mag_ = y_mag[:, :, :h]\n\t\t\tsc_loss += torch.norm((y_mag_-x_mag_), p = \"fro\")/torch.norm(y_mag_, p = \"fro\")\n\t\t\tspec_loss += torch.nn.L1Loss()(torch.log(x_mag_), torch.log(y_mag_))\n\t\t\tif h < x_mag.size(2):\n\t\t\t\tx_mag_m = x_mag[:, :, h:].mean(1)\n\t\t\t\ty_mag_m = y_mag[:, :, h:].mean(1)\n\t\t\t\tsc_loss += torch.norm((y_mag_m-x_mag_m), p = \"fro\")/torch.norm(y_mag_m, p = \"fro\")\n\t\t\t\tspec_loss += torch.nn.L1Loss()(torch.log(x_mag_m), torch.log(y_mag_m))\n\t\tif hps.mel:\n\t\t\tsc_loss += torch.norm((y_mel-x_mel), p = \"fro\")/torch.norm(y_mel, p = \"fro\")\n\t\t\tspec_loss += torch.nn.L1Loss()(torch.log(x_mel), torch.log(y_mel))\n\t\ts = int(hps.mag)+int(hps.mel)\n\t\tif s == 0:\n\t\t\tprint('Error: hps.mag and hps.mel are both set as False.')\n\t\t\texit()\n\t\treturn sc_loss/s, spec_loss/s\n\n\ndef stft(x, fft_size, hop_size, win_length, window, b):\n\t\"\"\"Perform STFT and convert to magnitude spectrogram.\n\n\tArgs:\n\t\tx (Tensor): Input signal tensor (B, T).\n\t\tfft_size (int): FFT size.\n\t\thop_size (int): Hop size.\n\t\twin_length (int): Window length.\n\t\twindow (str): Window function type.\n\t\tb (Tensor): Mel basis (fft_size//2+1, num_mels).\n\n\tReturns:\n\t\tTensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).\n\n\t\"\"\"\n\tx_stft = torch.stft(x, fft_size, hop_size, win_length, window)\n\treal = x_stft[..., 0]\n\timag = x_stft[..., 1]\n\n\t# NOTE(kan-bayashi): clamp is needed to avoid nan or inf\n\tmag = torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)\n\treturn mag, torch.clamp(torch.matmul(mag, b), min = 1e-7**0.5)\n\n"
] | [
[
"torch.norm",
"torch.Tensor",
"torch.nn.ModuleList",
"torch.sum",
"torch.matmul",
"torch.log",
"torch.clamp",
"torch.nn.L1Loss",
"torch.stft"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kanga333/PyAthena | [
"487baa66ae203c3541d37191600f1f3219a2e1ac"
] | [
"pyathena/util.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport functools\nimport logging\nimport threading\nimport re\nimport uuid\n\nimport tenacity\nfrom past.builtins import xrange\nfrom tenacity import (after_log, retry_if_exception,\n stop_after_attempt, wait_exponential)\n\nfrom pyathena import DataError, OperationalError\nfrom pyathena.model import AthenaCompression\n\n_logger = logging.getLogger(__name__)\n\nPATTERN_OUTPUT_LOCATION = re.compile(r'^s3://(?P<bucket>[a-zA-Z0-9.\\-_]+)/(?P<key>.+)$')\n\n\ndef parse_output_location(output_location):\n match = PATTERN_OUTPUT_LOCATION.search(output_location)\n if match:\n return match.group('bucket'), match.group('key')\n else:\n raise DataError('Unknown `output_location` format.')\n\n\ndef get_chunks(df, chunksize=None):\n rows = len(df)\n if rows == 0:\n return\n if chunksize is None:\n chunksize = rows\n elif chunksize <= 0:\n raise ValueError('Chunk size argument must be greater than zero')\n\n chunks = int(rows / chunksize) + 1\n for i in xrange(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, rows)\n if start_i >= end_i:\n break\n yield df[start_i:end_i]\n\n\ndef reset_index(df, index_label=None):\n df.index.name = index_label if index_label else 'index'\n try:\n df.reset_index(inplace=True)\n except ValueError as e:\n raise ValueError('Duplicate name in index/columns: {0}'.format(e))\n\n\ndef as_pandas(cursor, coerce_float=False):\n from pandas import DataFrame\n names = [metadata[0] for metadata in cursor.description]\n return DataFrame.from_records(cursor.fetchall(), columns=names,\n coerce_float=coerce_float)\n\n\ndef to_sql_type_mappings(col):\n import pandas as pd\n col_type = pd._lib.infer_dtype(col, skipna=True)\n if col_type == 'datetime64' or col_type == 'datetime':\n return 'TIMESTAMP'\n elif col_type == 'timedelta':\n return 'INT'\n elif col_type == \"timedelta64\":\n return 'BIGINT'\n elif col_type == 'floating':\n if col.dtype == 'float32':\n return 'FLOAT'\n else:\n return 'DOUBLE'\n elif col_type == 'integer':\n if col.dtype == 'int32':\n return 'INT'\n else:\n return 'BIGINT'\n elif col_type == 'boolean':\n return 'BOOLEAN'\n elif col_type == \"date\":\n return 'DATE'\n elif col_type == 'bytes':\n return 'BINARY'\n elif col_type in ['complex', 'time']:\n raise ValueError('{0} datatype not supported'.format(col_type))\n return 'STRING'\n\n\ndef to_sql(df, name, conn, location, schema='default',\n index=False, index_label=None, chunksize=None,\n if_exists='fail', compression=None, flavor='spark',\n type_mappings=to_sql_type_mappings):\n # TODO Supports orc, avro, json, csv or tsv format\n # TODO Supports partitioning\n if if_exists not in ('fail', 'replace', 'append'):\n raise ValueError('`{0}` is not valid for if_exists'.format(if_exists))\n if compression is not None and not AthenaCompression.is_valid(compression):\n raise ValueError('`{0}` is not valid for compression'.format(compression))\n\n import pyarrow as pa\n import pyarrow.parquet as pq\n bucket_name, key_prefix = parse_output_location(location)\n bucket = conn.session.resource('s3', region_name=conn.region_name,\n **conn._client_kwargs).Bucket(bucket_name)\n cursor = conn.cursor()\n retry_config = conn.retry_config\n\n table = cursor.execute(\"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = '{schema}'\n AND table_name = '{table}'\n \"\"\".format(schema=schema, table=name)).fetchall()\n if if_exists == 'fail':\n if table:\n raise OperationalError('Table `{0}.{1}` already exists.'.format(schema, name))\n elif if_exists == 'replace':\n if table:\n cursor.execute(\"\"\"\n DROP TABLE {schema}.{table}\n \"\"\".format(schema=schema, table=name))\n objects = bucket.objects.filter(Prefix=key_prefix)\n if list(objects.limit(1)):\n objects.delete()\n\n if index:\n reset_index(df, index_label)\n for chunk in get_chunks(df, chunksize):\n table = pa.Table.from_pandas(chunk)\n buf = pa.BufferOutputStream()\n pq.write_table(table, buf,\n compression=compression,\n flavor=flavor)\n retry_api_call(bucket.put_object,\n config=retry_config,\n Body=buf.getvalue().to_pybytes(),\n Key=key_prefix + str(uuid.uuid4()))\n\n ddl = generate_ddl(df=df,\n name=name,\n location=location,\n schema=schema,\n compression=compression,\n type_mappings=type_mappings)\n cursor.execute(ddl)\n\n\ndef get_column_names_and_types(df, type_mappings):\n return [\n (str(df.columns[i]), type_mappings(df.iloc[:, i]))\n for i in xrange(len(df.columns))\n ]\n\n\ndef generate_ddl(df, name, location, schema='default', compression=None,\n type_mappings=to_sql_type_mappings):\n ddl = 'CREATE EXTERNAL TABLE IF NOT EXISTS `{0}`.`{1}` (\\n'.format(schema, name)\n ddl += ',\\n'.join([\n '`{0}` {1}'.format(c[0], c[1])\n for c in get_column_names_and_types(df, type_mappings)\n ])\n ddl += '\\n)\\n'\n ddl += 'STORED AS PARQUET\\n'\n ddl += \"LOCATION '{0}'\\n\".format(location)\n if compression:\n ddl += \"TBLPROPERTIES ('parquet.compress'='{0}')\\n\".format(compression.upper())\n return ddl\n\n\ndef synchronized(wrapped):\n \"\"\"The missing @synchronized decorator\n\n https://git.io/vydTA\"\"\"\n _lock = threading.RLock()\n\n @functools.wraps(wrapped)\n def _wrapper(*args, **kwargs):\n with _lock:\n return wrapped(*args, **kwargs)\n return _wrapper\n\n\nclass RetryConfig(object):\n\n def __init__(self, exceptions=('ThrottlingException', 'TooManyRequestsException'),\n attempt=5, multiplier=1, max_delay=100, exponential_base=2):\n self.exceptions = exceptions\n self.attempt = attempt\n self.multiplier = multiplier\n self.max_delay = max_delay\n self.exponential_base = exponential_base\n\n\ndef retry_api_call(func, config, logger=None,\n *args, **kwargs):\n retry = tenacity.Retrying(\n retry=retry_if_exception(\n lambda e: getattr(e, 'response', {}).get(\n 'Error', {}).get('Code', None) in config.exceptions\n if e else False),\n stop=stop_after_attempt(config.attempt),\n wait=wait_exponential(multiplier=config.multiplier,\n max=config.max_delay,\n exp_base=config.exponential_base),\n after=after_log(logger, logger.level) if logger else None,\n reraise=True\n )\n return retry(func, *args, **kwargs)\n"
] | [
[
"pandas._lib.infer_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1170300521/StyleGAN-nada | [
"1b6dc2d7dcbc37dd2e29af2f8b59d7635e6a26ec",
"1b6dc2d7dcbc37dd2e29af2f8b59d7635e6a26ec"
] | [
"ZSSGAN/utils/svm.py",
"ZSSGAN/test.py"
] | [
"import numpy as np\nfrom sklearn import svm\n\n\ndef train_boundary(pos_codes, neg_codes, split_ratio=0.7):\n pos_ids = np.arange(len(pos_codes))\n np.random.shuffle(pos_ids)\n train_pos_num = int(len(pos_ids) * split_ratio)\n train_pos_codes = pos_codes[pos_ids[:train_pos_num]]\n val_pos_codes = pos_codes[pos_ids[train_pos_num:]]\n\n neg_ids = np.arange(len(neg_codes))\n np.random.shuffle(neg_ids)\n train_neg_num = int(len(neg_ids) * split_ratio)\n train_neg_codes = neg_codes[neg_ids[:train_neg_num]]\n val_neg_codes = neg_codes[neg_ids[train_neg_num:]]\n\n train_data = np.concatenate([train_pos_codes, train_neg_codes], axis=0)\n train_label = np.concatenate([np.ones(train_pos_num, dtype=np.int),\n np.zeros(train_neg_num, dtype=np.int)], axis=0)\n print(f'Training: {train_pos_num} positive, {train_neg_num} negtive.')\n\n val_data = np.concatenate([val_pos_codes, val_neg_codes], axis=0)\n val_label = np.concatenate([np.ones(len(val_pos_codes)),\n np.zeros(len(val_neg_codes))], axis=0)\n print(f'Validation: {len(val_pos_codes)} positive, {len(val_neg_codes)} negtive.')\n\n clf = svm.SVC(kernel='linear')\n classifier = clf.fit(train_data, train_label)\n\n if len(val_label) > 0:\n val_pred = classifier.predict(val_data)\n correct_num = np.sum(val_label == val_pred)\n print(f'Accurracy for validattion set: {correct_num} / {len(val_label)} = {correct_num / len(val_label):.6f}.')\n \n a = classifier.coef_.reshape(1, pos_codes.shape[1]).astype(np.float32)\n\n # Specific for initialization of dynamic svm\n if split_ratio == 1:\n return np.concatenate([a, [classifier.intercept_.astype(np.float)]], axis=-1)\n return a / np.linalg.norm(a)\n\ndef get_delta_w(pos_path, output_path, delta_w_type='svm', args=None,\\\n neg_path=\"/home/ybyb/CODE/StyleGAN-nada/results/invert/A_gen_w.npy\"):\n pos_codes = np.load(pos_path).reshape((-1, 18, 512))[:, 0:(18-args.num_mask_last)]\n neg_codes = np.load(neg_path).reshape((-1, 18, 512))[:, 0:(18-args.num_mask_last)]\n chosen_num = min(500, len(neg_codes))\n pos_num = min(10000, len(pos_codes))\n # np.save(\"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/mean_delta_w.npy\", (pos_codes.mean(0) - neg_codes.mean(0)))\n np.random.shuffle(pos_codes)\n np.random.shuffle(neg_codes)\n pos_codes = pos_codes[0:pos_num].reshape((pos_num, -1))\n neg_codes = neg_codes[0:chosen_num].reshape((chosen_num, -1))\n if delta_w_type == 'svm':\n a = train_boundary(pos_codes, neg_codes, split_ratio=0.7)\n elif delta_w_type == 'mean':\n a = pos_codes.mean(0) - neg_codes.mean(0)\n a = a / np.linalg.norm(a)\n else:\n raise RuntimeError(f\"No type namely {delta_w_type}!\")\n tmp = np.zeros((18, 512))\n tmp[0:(18-args.num_mask_last)] = a.reshape((-1, 512))\n np.save(output_path, tmp)\n\nif __name__ == \"__main__\":\n pos_path = \"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/B_codes.npy\"\n # neg_path = \"/home/ybyb/CODE/StyleGAN-nada/results/invert/ffhq_w+.npy\"\n neg_path = \"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/A_codes.npy\"\n output_path = \"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/small_delta_w.npy\"\n get_delta_w(pos_path, neg_path, output_path)",
"import os\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nimport dlib\nfrom pathlib import Path\nfrom PIL import Image\nfrom argparse import Namespace\n\nfrom restyle.utils.common import tensor2im\nfrom restyle.models.psp import pSp\nfrom restyle.models.e4e import e4e\nfrom restyle.utils.inference_utils import run_on_batch\nfrom restyle.scripts.align_faces_parallel import align_face\nfrom utils.file_utils import save_images\nfrom options.train_options import TrainOptions\nfrom model.ZSSGAN import ZSSGAN\n\n\nargs = TrainOptions().parse()\npretrained_model_dir = '../weights'\nencoder_type = 'e4e' #@param['psp', 'e4e']\nda_dir = os.path.join(args.output_dir, 'checkpoint')\n\nrestyle_experiment_args = {\n \"model_path\": os.path.join(pretrained_model_dir, f\"restyle_{encoder_type}_ffhq_encode.pt\"),\n \"transform\": transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])\n}\n\nmodel_path = restyle_experiment_args['model_path']\nckpt = torch.load(model_path, map_location='cpu')\n\nopts = ckpt['opts']\n\nopts['checkpoint_path'] = model_path\nopts = Namespace(**opts)\n\nrestyle_net = (pSp if encoder_type == 'psp' else e4e)(opts)\n\nrestyle_net.eval()\nrestyle_net.cuda()\nprint('Model successfully loaded!')\n\ndef run_alignment(image_path):\n if not os.path.exists(\"shape_predictor_68_face_landmarks.dat\"):\n print('Downloading files for aligning face image...')\n os.system('wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2')\n os.system('bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2')\n print('Done.')\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n aligned_image = align_face(filepath=image_path, predictor=predictor) \n print(\"Aligned image has shape: {}\".format(aligned_image.size))\n return aligned_image \n\n\ndef get_avg_image(net):\n avg_image = net(net.latent_avg.unsqueeze(0),\n input_code=True,\n randomize_noise=False,\n return_latents=False,\n average_code=True)[0]\n avg_image = avg_image.to('cuda').float().detach()\n return avg_image\n\n\ndef get_transfer_image(image_path, net):\n\n input_image = run_alignment(image_path)\n\n img_transforms = restyle_experiment_args['transform']\n transformed_image = img_transforms(input_image)\n\n opts.n_iters_per_batch = 5\n opts.resize_outputs = False # generate outputs at full resolution\n\n\n with torch.no_grad():\n avg_image = get_avg_image(restyle_net)\n result_batch, result_latents = run_on_batch(transformed_image.unsqueeze(0).cuda(), restyle_net, opts, avg_image)\n\n #@title Convert inverted image.\n inverted_latent = torch.Tensor(result_latents[0][4]).cuda().unsqueeze(0).unsqueeze(1)\n\n with torch.no_grad():\n net.eval()\n \n [sampled_src, sampled_dst] = net(inverted_latent, input_is_latent=True)[0]\n \n # joined_img = torch.cat([sampled_src, sampled_dst], dim=0)\n save_images(sampled_dst, args.output_dir, Path(image_path).stem, 2, 0)\n # display(Image.open(os.path.join(sample_dir, f\"joined_{str(0).zfill(6)}.jpg\")).resize((512, 256)))\n\nif __name__ == \"__main__\":\n # Set up networks, optimizers.\n print(\"Initializing networks...\")\n net = ZSSGAN(args)\n image_path = \"../img/8.jpg\"\n get_transfer_image(image_path, net)\n"
] | [
[
"numpy.linalg.norm",
"numpy.save",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.ones",
"sklearn.svm.SVC",
"numpy.load",
"numpy.zeros",
"numpy.sum"
],
[
"torch.no_grad",
"torch.Tensor",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Giuseppe5/pytorch-ocr | [
"f8e89295e911c7a3eec6e3aa13335c031cd3adfe"
] | [
"main.py"
] | [
"# Copyright (c) 2018, Xilinx, Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport argparse\nimport json\nimport os\n\nimport torch\nimport numpy as np\nfrom ocr import PytorchOCRTrainer\n\ntorch.backends.cudnn.enabled = False\ntorch.set_printoptions(precision=10)\n\nclass objdict(dict):\n def __getattr__(self, name):\n if name in self:\n return self[name]\n else:\n raise AttributeError(\"No such attribute: \" + name)\n\n def __setattr__(self, name, value):\n self[name] = value\n\n def __delattr__(self, name):\n if name in self:\n del self[name]\n else:\n raise AttributeError(\"No such attribute: \" + name)\n\ndef ascii_encode_dict(data):\n ascii_encode = lambda x: x.encode('ascii')\n return dict(map(ascii_encode, pair) if isinstance(pair[1], unicode) else pair for pair in data.items())\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='OCR training')\n parser.add_argument('--params', '-p', default=\"default_trainer_params.json\", help='Path to params JSON file. Default ignored when resuming.')\n parser.add_argument('--experiments', '-e', default=\"experiments\", help='Path for experiments. Ignored when resuming.')\n parser.add_argument('--input', '-i', help='Path to input checkpoint.')\n parser.add_argument('--pretrained_policy', default=\"RESUME\", help='RESUME/RETRAIN.')\n parser.add_argument('--init_bn_fc_fusion', default=False, action='store_true', help='Init BN FC fusion.')\n parser.add_argument('--eval', default=False, action='store_true', help='Perform only evaluation on val dataset.')\n parser.add_argument('--export', default=False, action='store_true', help='Perform only export of quantized weights.')\n parser.add_argument('--no_cuda', default=False, action='store_true', help='Run on CPU.')\n parser.add_argument('--export_test_image', default=False, action='store_true', help='Export pre-quantized and reshaped test image.')\n parser.add_argument('--valid', default=\"db_files_uw3-500/valid.txt\", help='Input path for val file.')\n parser.add_argument('--sortedtrain', default=\"db_files_uw3-500/sortedTrain.txt\", help='Input path for train file.')\n parser.add_argument('--imgs', default=\"db_files_uw3-500/imgs\", help='Input path for images dir.')\n parser.add_argument('--dry_run', default=False, action='store_true', help='Do not write any output file.')\n parser.add_argument('--simd_factor', default=1, type=int, help='SIMD factor for export.')\n parser.add_argument('--pe', default=1, type=int, help='Number of PEs for export.')\n\n #Overrides\n parser.add_argument('--random_seed', type=int)\n parser.add_argument('--batch_size', type=int)\n parser.add_argument('--num_workers', type=int)\n parser.add_argument('--layer_size', type=int)\n parser.add_argument('--neuron_type', type=str)\n parser.add_argument('--target_height', type=int)\n parser.add_argument('--epochs', type=int)\n parser.add_argument('--lr', type=float)\n parser.add_argument('--lr_schedule', type=str)\n parser.add_argument('--lr_step', type=int)\n parser.add_argument('--lr_gamma', type=float)\n parser.add_argument('--max_norm', type=float)\n parser.add_argument('--seq_to_random_threshold', type=int)\n parser.add_argument('--bidirectional', type=bool)\n parser.add_argument('--reduce_bidirectional', type=str)\n parser.add_argument('--recurrent_bias_enabled', type=bool)\n parser.add_argument('--checkpoint_interval', type=int)\n parser.add_argument('--recurrent_weight_bit_width', type=int)\n parser.add_argument('--recurrent_weight_quantization', type=str)\n parser.add_argument('--recurrent_bias_bit_width', type=int)\n parser.add_argument('--recurrent_bias_quantization', type=str)\n parser.add_argument('--recurrent_activation_bit_width', type=int)\n parser.add_argument('--recurrent_activation_quantization', type=str)\n parser.add_argument('--internal_activation_bit_width', type=int)\n parser.add_argument('--fc_weight_bit_width', type=int)\n parser.add_argument('--fc_weight_quantization', type=str)\n parser.add_argument('--fc_bias_bit_width', type=int)\n parser.add_argument('--fc_bias_quantization', type=str)\n parser.add_argument('--quantize_input', type=bool)\n parser.add_argument('--mask_padded', type=bool)\n\n args = parser.parse_args()\n\n #Set paths relative to main.py\n path_args = ['params', 'experiments', 'input', 'valid', 'sortedtrain', 'imgs']\n for path_arg in path_args:\n path = getattr(args, path_arg)\n if path is not None and not os.path.isabs(path):\n abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path))\n setattr(args, path_arg, abs_path)\n\n #Avoid creating new folders etc. \n if args.eval or args.export or args.export_test_image:\n args.dry_run = True\n\n #force cpu when exporting weights\n if args.export or args.export_test_image:\n args.no_cuda = True\n\n if args.input and args.pretrained_policy == \"RESUME\" and args.params == \"default_trainer_params.json\":\n package = torch.load(args.input, map_location=lambda storage, loc: storage)\n trainer_params = package['trainer_params']\n else:\n with open(args.params) as d:\n trainer_params = json.load(d, object_hook=ascii_encode_dict)\n trainer_params = objdict(trainer_params)\n\n #Overrides\n if args.epochs is not None:\n trainer_params.epochs = args.epochs\n if args.internal_activation_bit_width is not None:\n trainer_params.internal_activation_bit_width = args.internal_activation_bit_width\n\n trainer = PytorchOCRTrainer(trainer_params, args)\n\n if args.export_test_image:\n trainer.export_test_image(trainer_params.target_height)\n exit(0)\n\n if args.export:\n trainer.export_model(args.simd_factor, args.pe)\n exit(0)\n\n if args.eval:\n trainer.eval_model()\n else:\n trainer.train_model()\n\n\n\n\n\n\n"
] | [
[
"torch.set_printoptions",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alsmeirelles/ResRep | [
"abc8d221cfa153de577ca1bbba515cc7abb94378"
] | [
"display_hdf5.py"
] | [
"from utils.misc import read_hdf5\nfrom utils.misc import extract_deps_from_weights_file\nimport sys\nimport numpy as np\n\nwf = sys.argv[1]\ndeps = extract_deps_from_weights_file(wf)\ndi = read_hdf5(wf)\nnum_kernel_params = 0\n\nconv_kernel_cnt = 0\nmatrix_param_cnt = 0\nvec_param_cnt = 0\n\nbias_cnt = 0\nbeta_cnt = 0\ngamma_cnt = 0\nmu_cnt = 0\nvar_cnt = 0\n\nfor name, array in di.items():\n if array.ndim in [2, 4]:\n num_kernel_params += array.size\n\n if 'base_mask' in name:\n print(name, array)\n\n print(name, array.shape, np.mean(array), np.std(array),\n ' positive {}, negative {}, zeros {}, near-zero {}'.format(np.sum(array > 0), np.sum(array < 0), np.sum(array == 0),\n np.sum(np.abs(array) <= 1e-5)))\n\n if array.ndim == 2:\n matrix_param_cnt += array.size\n elif array.ndim == 1:\n vec_param_cnt += array.size\n elif array.ndim == 4:\n conv_kernel_cnt += array.size\n if 'running_mean' in name or 'moving_mean' in name:\n mu_cnt += array.size\n elif 'running_var' in name or 'moving_var' in name:\n var_cnt += array.size\n elif ('weight' in name and 'bn' in name.lower()) or 'gamma' in name:\n gamma_cnt += array.size\n elif ('bias' in name and 'bn' in name.lower()) or 'beta' in name:\n beta_cnt += array.size\n elif 'bias' in name:\n bias_cnt += array.size\n elif 'spatial_mask' in name:\n print(array)\n print(np.sum(array))\n\nprint('number of kernel params: ', num_kernel_params)\nprint('vec {}, matrix {}, conv {}, total {}'.format(vec_param_cnt, matrix_param_cnt, conv_kernel_cnt,\n vec_param_cnt + matrix_param_cnt + conv_kernel_cnt))\nprint('mu {}, var {}, gamma {}, beta {}, bias {}'.format(mu_cnt, var_cnt, gamma_cnt, beta_cnt, bias_cnt))\n\nprint('Model deps: {}'.format(deps))\n"
] | [
[
"numpy.abs",
"numpy.std",
"numpy.mean",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xsppp/gpipe_with_Mnist | [
"5cd8aff375e7f8fc3c6fb065ce3f40854eb6f31a",
"4486e675c7b52c7519a6d39f97e9b22ed5461944"
] | [
"lingvo/tasks/car/input_preprocessors.py",
"lingvo/core/layers.py"
] | [
"# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Input preprocessors.\"\"\"\n\nfrom lingvo import compat as tf\nfrom lingvo.core import base_layer\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\nfrom lingvo.tasks.car import car_lib\nfrom lingvo.tasks.car import detection_3d_lib\nfrom lingvo.tasks.car import geometry\nfrom lingvo.tasks.car import ops\nimport numpy as np\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops import inplace_ops\n# pylint:enable=g-direct-tensorflow-import\n\n\ndef _ConsistentShuffle(tensors, seed):\n \"\"\"Shuffle multiple tensors with the same shuffle order.\"\"\"\n shuffled_idx = tf.range(tf.shape(tensors[0])[0])\n shuffled_idx = tf.random.shuffle(shuffled_idx, seed=seed)\n return tuple([tf.gather(t, shuffled_idx) for t in tensors])\n\n\ndef _GetApplyPointMaskFn(points_mask):\n \"\"\"Returns a function that applies a mask to one of our points tensors.\"\"\"\n\n def _ApplyPointMaskFn(points_tensor):\n \"\"\"Applies a mask to the points tensor.\"\"\"\n if points_tensor is None:\n return points_tensor\n return tf.boolean_mask(points_tensor, points_mask)\n\n return _ApplyPointMaskFn\n\n\ndef _Dense(sparse):\n return tf.sparse_to_dense(\n sparse_indices=sparse.indices,\n output_shape=sparse.dense_shape,\n sparse_values=sparse.values,\n default_value=0)\n\n\nclass Preprocessor(base_layer.BaseLayer):\n \"\"\"Base class for input preprocessor.\n\n Input preprocessors expect the combined output of all extractors and performs\n a transformation on them. Input preprocessors can add/edit/remove fields\n from the NestedMap of features.\n\n Note: Features correspond to that for one example (no batch dimension).\n\n Sub-classes need to implement the following three functions:\n\n 1) TransformFeatures(features): Given a NestedMap of features representing the\n output of all the extractors, apply a transformation on the features.\n\n 2) TransformShapes(shapes): Given a corresponding NestedMap of shapes,\n produce a NestedMap of shapes that corresponds to the transformation of the\n features after TransformFeatures.\n\n 3) TransformDTypes(dtypes): Given a corresponding NestedMap of dtypes,\n produce a NestedMap of dtypes that corresponds to the transformation of the\n features after TransformFeatures.\n\n The preprocessor is expected to explicitly pass through untouched fields.\n For example, a preprocessor that does data augmentation should modify the\n features NestedMap on the fields it cares about augmenting, and then return\n the features NestedMap.\n \"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Default params.\"\"\"\n p = super().Params()\n p.name = cls.__name__\n return p\n\n def FProp(self, theta, features):\n \"\"\"Performs TransformFeatures.\"\"\"\n del theta # unused\n return self.TransformFeatures(features)\n\n def TransformFeatures(self, features):\n \"\"\"Transforms the features for one example.\n\n Args:\n features: A `NestedMap` of tensors.\n\n Returns:\n A `NestedMap` of tensors corresponding.\n \"\"\"\n raise NotImplementedError()\n\n def TransformShapes(self, shapes):\n \"\"\"Sets correct shapes corresponding to TransformFeatures.\n\n Args:\n shapes: A `NestedMap` of TensorShapes, corresponding to the\n pre-transformed features.\n\n Returns:\n A `NestedMap` of TensorShapes corresponding to the transformed features.\n \"\"\"\n raise NotImplementedError()\n\n def TransformDTypes(self, dtypes):\n \"\"\"Sets correct dtypes corresponding to TransformFeatures.\n\n Args:\n dtypes: A `NestedMap` of DTypes, corresponding to the pre-transformed\n features.\n\n Returns:\n A `NestedMap` of DTypes corresponding to the transformed features.\n \"\"\"\n raise NotImplementedError()\n\n\nclass EntryPreprocessor(Preprocessor):\n \"\"\"A Preprocessor that transforms a NestedMap sub-structure.\n\n Some preprocessors want to apply a function to any NestedMap whose key matches\n a specific prefix. An EntryPreprocessor provides an interface for specifying\n the function transformation for a NestedMap of inputs, adding, modifying, or\n deleting the entries in that NestedMap.\n\n For example, if an input contains a nested structure such as:\n - lasers.front.xyz\n .features\n - lasers.side.xyz\n .features\n\n and one wants to apply a transform that modifies the .xyz features\n on both structures, one can define an EntryPreprocessor that implements:\n\n UpdateEntry(entry):\n UpdateEntryShape(shapes):\n UpdateEntryDType(dtypes):\n\n and set self.params.prefixes = ['lasers.front', 'lasers.side']\n where the prefixes refer to a fully-qualified NestedMap sub-structure.\n\n The arguments to these functions will contain just the NestedMap structure\n whose key prefix can be found in self.params.prefixes. One can then modify\n these structures as desired.\n\n Example:\n def UpdateEntry(self, entry):\n # entry is a NestedMap.\n assert 'xyz' in entry\n entry.xyz = self._ApplyFn(entry.xyz)\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('prefixes', ['pseudo_ri'], 'List of keys to apply to.')\n return p\n\n def _ApplyToMatchingStructure(self, nested_map, fn):\n \"\"\"Apply fn to any NestedMap sub-structure whose prefix is in p.prefixes.\"\"\"\n p = self.params\n # Don't mutate the original.\n nested_map = nested_map.DeepCopy()\n updated_entries = []\n for prefix in p.prefixes:\n entry = nested_map.GetItem(prefix)\n if not isinstance(entry, py_utils.NestedMap):\n raise TypeError('Prefix key {} selected a {}, not a NestedMap!'.format(\n prefix, type(entry)))\n fn(entry)\n updated_entries.append(entry)\n return nested_map, updated_entries\n\n def UpdateEntry(self, entry):\n \"\"\"Update the Tensors in a NestedMap entry.\n\n Args:\n entry: A NestedMap of Tensors.\n \"\"\"\n raise NotImplementedError()\n\n def UpdateEntryShape(self, shapes):\n \"\"\"Update the shapes in a NestedMap entry.\n\n Args:\n shapes: A NestedMap of TensorShapes.\n \"\"\"\n raise NotImplementedError()\n\n def UpdateEntryDType(self, dtypes):\n \"\"\"Transform the dtypes in a NestedMap entry.\n\n Args:\n dtypes: A NestedMap of dtypes.\n \"\"\"\n raise NotImplementedError()\n\n def TransformFeatures(self, features):\n features, _ = self._ApplyToMatchingStructure(features, self.UpdateEntry)\n return features\n\n def TransformShapes(self, shapes):\n shapes, _ = self._ApplyToMatchingStructure(shapes, self.UpdateEntryShape)\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes, _ = self._ApplyToMatchingStructure(dtypes, self.UpdateEntryDType)\n return dtypes\n\n\nclass CreateDecoderCopy(Preprocessor):\n \"\"\"Creates references to current lasers, images, and labels.\n\n This is useful if the data is further transformed.\n\n If desired, the keys that are copied can be customized by overriding the\n default keys param.\n\n This preprocessor expects features to optionally contain the following keys:\n - lasers - a NestedMap of tensors\n - images - a NestedMap of tensors\n - labels - a NestedMap of tensors\n\n Adds the following features (if the features existed):\n - decoder_copy.lasers - a copy of the lasers NestedMap\n - decoder_copy.images - a copy of the images NestedMap\n - decoder_copy.labels - a copy of the labels NestedMap\n\n The processor also by default pads the laser features; this can be disabled\n by setting the pad_lasers param to None.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keys', ['lasers', 'labels', 'images'],\n 'Keys to look for and copy if exists.')\n p.Define('parent_key', 'decoder_copy', 'The key to nest the copies under.')\n p.Define('pad_lasers', PadLaserFeatures.Params(),\n 'Params for a layer that pads the laser features.')\n p.name = 'create_decoder_copy'\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.pad_lasers is not None:\n self.CreateChild('pad_lasers', p.pad_lasers)\n\n def _DeepCopyIfExists(self, keys, nested_map, parent_key):\n \"\"\"Deep copy a specific key to a parent key if it exists.\"\"\"\n for key in keys:\n if key in nested_map:\n if parent_key not in nested_map:\n nested_map[parent_key] = py_utils.NestedMap()\n nested_map[parent_key][key] = nested_map[key].DeepCopy()\n return nested_map\n\n def TransformFeatures(self, features):\n p = self.params\n features = self._DeepCopyIfExists(p.keys, features, p.parent_key)\n if p.pad_lasers is not None:\n features[p.parent_key] = self.pad_lasers.TransformFeatures(\n features[p.parent_key])\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes = self._DeepCopyIfExists(p.keys, shapes, p.parent_key)\n if p.pad_lasers is not None:\n shapes[p.parent_key] = self.pad_lasers.TransformShapes(\n shapes[p.parent_key])\n return shapes\n\n def TransformDTypes(self, dtypes):\n p = self.params\n dtypes = self._DeepCopyIfExists(p.keys, dtypes, p.parent_key)\n if p.pad_lasers is not None:\n dtypes[p.parent_key] = self.pad_lasers.TransformDTypes(\n dtypes[p.parent_key])\n return dtypes\n\n\nclass FilterByKey(Preprocessor):\n \"\"\"Filters features to keep only specified keys.\n\n This keeps only feature entries that are specified. This allows us to reduce\n the number of fields returned. For example, during training, one may not\n need the actual laser points if training with a pillars based model that\n has a preprocessor that already maps the points to grid.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'keep_key_prefixes', [''], 'Prefixes of keys to keep. If this '\n 'contains the empty string, then it will keep all the keys.')\n return p\n\n def _FilterFn(self, key, entry):\n \"\"\"Filter a nested map.\"\"\"\n del entry # unused\n p = self.params\n for prefix in p.keep_key_prefixes:\n if key.startswith(prefix):\n return True\n return False\n\n def TransformFeatures(self, features):\n return features.FilterKeyVal(self._FilterFn)\n\n def TransformShapes(self, shapes):\n return shapes.FilterKeyVal(self._FilterFn)\n\n def TransformDTypes(self, dtypes):\n return dtypes.FilterKeyVal(self._FilterFn)\n\n\nclass FilterGroundTruthByNumPoints(Preprocessor):\n \"\"\"Removes ground truth boxes with less than params.min_num_points points.\n\n This preprocessor expects features to contain the following keys::\n labels.labels of shape [..., L]\n labels.bboxes_3d of shape [..., L, 7]\n labels.bboxes_3d_mask of shape [..., L]\n labels.unfiltered_bboxes_3d_mask of shape [..., L]\n labels.bboxes_3d_num_points of shape [..., L].\n\n Modifies the bounding box data to turn off ground truth objects that don't\n meet the params.min_num_points point filter:\n\n labels.labels: Boxes with less than params.min_num_points have their label\n set to params.background_id (defaults to 0).\n\n labels.bboxes_3d_mask: Boxes with less than params.min_num_points are set\n to 0.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'min_num_points', 1, 'The minimum number of points allowed before '\n 'the associated ground truth box is turned off. Defaults to 1.')\n p.Define(\n 'background_id', 0, 'The ID of the background class we set '\n 'filtered boxes to. Defaults to 0.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n bbox_is_valid = tf.greater_equal(features.labels.bboxes_3d_num_points,\n p.min_num_points)\n features.labels.labels = tf.where(\n bbox_is_valid, features.labels.labels,\n p.background_id * tf.ones_like(features.labels.labels))\n features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass FilterGroundTruthByDifficulty(Preprocessor):\n \"\"\"Removes groundtruth boxes based on detection difficulty.\n\n This preprocessor expects features to contain the following keys::\n labels.single_frame_detection_difficulties of shape [..., L]\n labels.labels of shape [..., L]\n labels.bboxes_3d_mask of shape [..., L]\n labels.unfiltered_bboxes_3d_mask of shape [..., L]\n\n The preprocessor masks out the bboxes_3d_mask / labels based on whether\n single_frame_detection_difficulties is greater than p.difficulty_threshold.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'background_id', 0, 'The ID of the background class we set '\n 'filtered boxes to. Defaults to 0.')\n p.Define(\n 'difficulty_threshold', 1,\n 'Filter groundtruth bounding boxes whose detection difficulty is '\n 'greater than `difficulty_threshold`')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n bbox_is_valid = tf.less_equal(\n features.labels.single_frame_detection_difficulties,\n p.difficulty_threshold)\n features.labels.labels = tf.where(\n bbox_is_valid, features.labels.labels,\n p.background_id * tf.ones_like(features.labels.labels))\n features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass CountNumberOfPointsInBoxes3D(Preprocessor):\n \"\"\"Computes bboxes_3d_num_points.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n - labels.bboxes_3d_mask of shape [L]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Adds the following features:\n labels.bboxes_3d_num_points: [L] - integer tensor containing the number of\n laser points for each corresponding bbox.\n \"\"\"\n\n def TransformFeatures(self, features):\n points_xyz = features.lasers.points_xyz\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n\n points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz,\n features.labels.bboxes_3d)\n bboxes_3d_num_points = tf.reduce_sum(\n tf.cast(points_in_bboxes_mask, tf.int32), axis=0, keepdims=False)\n bboxes_3d_num_points *= tf.cast(features.labels.bboxes_3d_mask, tf.int32)\n\n features.labels.bboxes_3d_num_points = bboxes_3d_num_points\n return features\n\n def TransformShapes(self, shapes):\n num_bboxes = shapes.labels.bboxes_3d[0]\n shapes.labels.bboxes_3d_num_points = tf.TensorShape([num_bboxes])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.labels.bboxes_3d_num_points = tf.int32\n return dtypes\n\n\nclass AddPerPointLabels(Preprocessor):\n \"\"\"Computes the class and bbox id of each point.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n - labels.labels of shape [L]\n\n This makes an assumption that each point is only in 1 box, which should\n almost always true in 3D. In cases where this is not true, the largest\n label integer and largest bbox_id will be assigned.\n\n NOTE: Be very careful that this is performed after any modifications\n to the semantic labels of each point in the pointcloud. Examples of this\n would be operators like GroundTruthAugmentation, or DropBoxesOutOfRange.\n\n Adds the following features:\n lasers.points_label: [P] - integer tensor containing the class id of each\n point.\n lasers.points_bbox_id: [P] - integer tensor containing box id of each\n point from 0 to num_bboxes, where an id of num_bboxes indicates a\n background point.\n lasers.points_bbox_3d: [P, 7] - float tensor containing bounding box of\n each point.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'per_dimension_adjustment', None,\n 'A list of len 3 of floats with the amount (in meters) to add to '\n 'each dimension of the box before using it to select points. '\n 'If enabled, this is designed to protect against overly tight box '\n 'annotations that appear in KITTI.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n points_xyz = features.lasers.points_xyz\n bboxes_3d = features.labels.bboxes_3d\n num_points, _ = py_utils.GetShape(points_xyz)\n num_bboxes, _ = py_utils.GetShape(bboxes_3d)\n\n if p.per_dimension_adjustment:\n if len(p.per_dimension_adjustment) != 3:\n raise ValueError(\n 'param `per_dimension_adjustment` expected to be len 3.')\n dims_adjustment = tf.constant([0, 0, 0] + p.per_dimension_adjustment +\n [0])\n bboxes_3d = bboxes_3d + dims_adjustment\n\n # Find which points are in each box and what class each box is.\n points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz, bboxes_3d)\n points_in_bboxes_mask = tf.cast(points_in_bboxes_mask, tf.int32)\n points_in_bboxes_mask = py_utils.HasShape(points_in_bboxes_mask,\n [num_points, num_bboxes])\n\n # points_in_bboxes_mask is a [num_points, num_bboxes] 0/1 tensor\n # indicating whether that point is in a given box.\n # Each point should only be in one box, so after broadcasting the label\n # across the binary mask, we do a reduce_max to get the max label id\n # for each point. Since each point only belongs to one box, it will be\n # the only non-zero (background) label in that box.\n # Note: We assume background to be class_id == 0\n points_label = tf.reduce_max(\n points_in_bboxes_mask * features.labels.labels, axis=1)\n points_bbox_id = tf.argmax(\n points_in_bboxes_mask, axis=1, output_type=tf.int32)\n # If the class is background, make its id == num_bboxes\n points_bbox_id = tf.where(points_label > 0, points_bbox_id,\n tf.broadcast_to(num_bboxes, [num_points]))\n\n # For each point, get the bbox_3d data.\n dummy_bbox = tf.constant([[0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)\n bboxes_3d = tf.concat([bboxes_3d, dummy_bbox], axis=0)\n points_bbox_3d = tf.gather(bboxes_3d, points_bbox_id)\n\n points_label = tf.reshape(points_label, [num_points])\n points_bbox_id = tf.reshape(points_bbox_id, [num_points])\n features.lasers.points_label = points_label\n features.lasers.points_bbox_id = points_bbox_id\n features.lasers.points_bbox_3d = points_bbox_3d\n return features\n\n def TransformShapes(self, shapes):\n num_points = shapes.lasers.points_xyz[0]\n shapes.lasers.points_label = tf.TensorShape([num_points])\n shapes.lasers.points_bbox_id = tf.TensorShape([num_points])\n shapes.lasers.points_bbox_3d = tf.TensorShape([num_points, 7])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.lasers.points_label = tf.int32\n dtypes.lasers.points_bbox_id = tf.int32\n dtypes.lasers.points_bbox_3d = tf.float32\n return dtypes\n\n\nclass PointsToGrid(Preprocessor):\n \"\"\"Bins points to a 3D-grid using custom op: ops.point_to_grid.\n\n Expects features to have keys:\n - lasers.points_xyz of shape [P, 3]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n If normalizing the labels is enabled, then also expects:\n - labels.weights\n - labels.bboxes_td\n - labels.bboxes_td_mask\n - labels.bboxes_3d_mask\n\n Let:\n gx, gy, gz = p.grid_size\n F = 3 + num_laser_features\n\n Adds the following features:\n grid_centers: [gx, gy, gz, 3]: For each grid cell, the (x,y,z)\n floating point coordinate of its center.\n grid_num_points: [gx, gy, gz]: The number of points in each grid\n cell (integer).\n laser_grid: [gx, gy, gz, num_points_per_cell, F] - A 5D floating\n point Tensor containing the laser data placed into a fixed grid.\n\n Modifies the bboxes in labels to also be within the grid range x/y by default.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_points_per_cell', 100,\n 'The maximum number of points per cell.')\n p.Define('grid_size', (40, 40, 1), 'Grid size along x,y,z axis.')\n\n # The max range of x and y is [-80, 80].\n p.Define('grid_range_x', (-80, 80), 'The X-axis Range covered by the grid')\n p.Define('grid_range_y', (-80, 80), 'The Y-axis Range covered by the grid')\n p.Define('grid_range_z', (-2, 4), 'The Z-axis Range covered by the grid')\n\n p.Define('normalize_td_labels', True,\n 'Whether to clip the labels to the grid limits.')\n return p\n\n def _NormalizeLabels(self, ymin, xmin, ymax, xmax, x_range, y_range):\n \"\"\"Normalizes the bboxes within a given range.\"\"\"\n assert x_range, 'Must specify x_range if clipping.'\n assert y_range, 'Must specify y_range if clipping.'\n assert len(x_range) == 2, 'x_range %s must be 2 elements.' % x_range\n assert len(y_range) == 2, 'y_range %s must be 2 elements.' % y_range\n\n x_range_min = x_range[0]\n x_range_len = x_range[1] - x_range[0]\n y_range_min = y_range[0]\n y_range_len = y_range[1] - y_range[0]\n\n xmin = tf.cast(xmin - x_range_min, tf.float32) / tf.cast(\n x_range_len, tf.float32)\n xmax = tf.cast(xmax - x_range_min, tf.float32) / tf.cast(\n x_range_len, tf.float32)\n ymin = tf.cast(ymin - y_range_min, tf.float32) / tf.cast(\n y_range_len, tf.float32)\n ymax = tf.cast(ymax - y_range_min, tf.float32) / tf.cast(\n y_range_len, tf.float32)\n\n return ymin, xmin, ymax, xmax\n\n def TransformFeatures(self, features):\n p = self.params\n\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n if ('points_padding' in features.lasers and\n features.lasers.points_padding is not None):\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n points_feature = tf.boolean_mask(points_feature, points_mask)\n\n points_full = tf.concat([points_xyz, points_feature], axis=-1)\n points_grid_full, grid_centers, num_points = ops.point_to_grid(\n points_full, p.num_points_per_cell, p.grid_size[0], p.grid_size[1],\n p.grid_size[2], p.grid_range_x, p.grid_range_y, p.grid_range_z)\n\n features.laser_grid = points_grid_full\n features.grid_centers = grid_centers\n features.grid_num_points = num_points\n\n if p.normalize_td_labels:\n # Normalize bboxes_td w.r.t grid range.\n obb = features.labels\n x_range = p.grid_range_x\n y_range = p.grid_range_y\n ymin, xmin, ymax, xmax = tf.unstack(obb.bboxes_td[..., :4], axis=-1)\n ymin, xmin, ymax, xmax = self._NormalizeLabels(\n ymin, xmin, ymax, xmax, x_range=x_range, y_range=y_range)\n obb.bboxes_td = tf.concat(\n [tf.stack([ymin, xmin, ymax, xmax], axis=-1), obb.bboxes_td[..., 4:]],\n axis=-1)\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes.grid_centers = tf.TensorShape(list(p.grid_size) + [3])\n shapes.grid_num_points = tf.TensorShape(list(p.grid_size))\n shapes.laser_grid = tf.TensorShape(\n list(p.grid_size) +\n [p.num_points_per_cell, 3 + shapes.lasers.points_feature[-1]])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.grid_centers = tf.float32\n dtypes.grid_num_points = tf.int32\n dtypes.laser_grid = tf.float32\n return dtypes\n\n\nclass _PointPillarGridSettings:\n \"\"\"Settings for PointPillars model defined in paper.\n\n https://arxiv.org/abs/1812.05784\n \"\"\"\n # Chooses grid sizes that are a multiple of 16 to support point pillars\n # model requirements. These also happen to match the values\n # in the PointPillars paper (voxel width of 0.16m in x, y)\n GRID_X = 432\n GRID_Y = 496\n GRID_Z = 1\n\n # These fields are set in the subclasses.\n GRID_X_RANGE = None\n GRID_Y_RANGE = None\n GRID_Z_RANGE = None\n\n @classmethod\n def UpdateGridParams(cls, grid_params):\n \"\"\"Apply PointPillars settings to grid_params.\"\"\"\n grid_params.grid_size = (cls.GRID_X, cls.GRID_Y, cls.GRID_Z)\n grid_params.grid_range_x = cls.GRID_X_RANGE\n grid_params.grid_range_y = cls.GRID_Y_RANGE\n grid_params.grid_range_z = cls.GRID_Z_RANGE\n\n @classmethod\n def UpdateAnchorGridParams(cls, anchor_params, output_stride=2):\n \"\"\"Apply PointPillars settings to anchor_params.\"\"\"\n # Set anchor settings to match grid settings.\n # Grid size for anchors is half the resolution.\n anchor_params.grid_size = (cls.GRID_X // output_stride,\n cls.GRID_Y // output_stride, cls.GRID_Z)\n anchor_params.grid_range_x = cls.GRID_X_RANGE\n anchor_params.grid_range_y = cls.GRID_Y_RANGE\n # Grid along z axis should be pinned to 0.\n anchor_params.grid_range_z = (0, 0)\n\n\ndef MakeGridSettings(grid_x_range, grid_y_range, grid_z_range, grid_x, grid_y,\n grid_z):\n \"\"\"Returns configured class for PointPillar grid settings.\"\"\"\n\n class GridSettings(_PointPillarGridSettings):\n GRID_X_RANGE = grid_x_range\n GRID_Y_RANGE = grid_y_range\n GRID_Z_RANGE = grid_z_range\n GRID_X = grid_x\n GRID_Y = grid_y\n GRID_Z = grid_z\n\n return GridSettings\n\n\nPointPillarGridCarSettings = MakeGridSettings(\n grid_x_range=(0, 69.12),\n grid_y_range=(-39.68, 39.68),\n grid_z_range=(-3, 1),\n grid_x=432,\n grid_y=496,\n grid_z=1)\n\nPointPillarGridPedCycSettings = MakeGridSettings(\n grid_x_range=(0, 47.36),\n grid_y_range=(-19.84, 19.84),\n grid_z_range=(-2.5, 0.5),\n grid_x=432,\n grid_y=496,\n grid_z=1)\n\n\nclass GridToPillars(Preprocessor):\n \"\"\"Create pillars from a grid of points.\n\n Expects features to have keys:\n grid_centers: [gx, gy, gz, 3]\n\n grid_num_points: [gx, gy, gz]\n\n laser_grid: [gx, gy, gz, num_points_per_cell, F]\n\n Adds the following features:\n point_count: [num_pillars]. The number of points in the pillar.\n\n point_locations: [num_pillars, 3]. The grid location of each pillar.\n\n pillar_points: [num_pillars, num_points_per_cell, F]. Points of each\n pillar.\n\n Drops the following features by default:\n laser_grid\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_points_per_cell', 100,\n 'The maximum number of points per cell.')\n p.Define('num_pillars', 12000, 'The maximum number of pillars to produce.')\n p.Define('drop_laser_grid', True, 'Whether to drop the laser_grid feature.')\n # The density based sampler is more expensive.\n p.Define('use_density_sampler', False,\n 'Use a density based sampler during pillar selection.')\n return p\n\n def _GumbelTransform(self, probs):\n \"\"\"Adds gumbel noise to log probabilities for multinomial sampling.\n\n This enables fast sampling from a multinomial distribution without\n replacement. See https://arxiv.org/abs/1611.01144 for details.\n A colab that demonstrates this in practice is here:\n http://colab/drive/1iuMt2n_r7dKPQG9T0UVMuK3fkbBayKjd\n\n Args:\n probs: A 1-D float tensor containing probabilities, summing to 1.\n\n Returns:\n A 1-D float tensor of the same size of probs, with gumbel noise added to\n log probabilities. Taking the top k elements from this provides a\n multinomial sample without replacement.\n \"\"\"\n p = self.params\n log_prob = tf.math.log(probs)\n probs_shape = tf.shape(probs)\n uniform_samples = tf.random.uniform(\n shape=probs_shape,\n dtype=probs.dtype,\n seed=p.random_seed,\n name='uniform_samples')\n gumbel_noise = -tf.math.log(-tf.math.log(uniform_samples))\n return gumbel_noise + log_prob\n\n def _DensitySample(self, num_points):\n p = self.params\n\n # Flatten to [nx * ny * nz] for convenience during sampling.\n num_grid_points = np.prod(p.grid_size)\n flattened_num_points = tf.reshape(num_points, [num_grid_points])\n\n # Normalize flattened_num_points to sum to 1.\n flattened_num_points = tf.cast(flattened_num_points, tf.float32)\n flattened_num_points /= tf.reduce_sum(flattened_num_points)\n\n # TODO(jngiam): Consider generalizing this to enable other methods of\n # sampling: e.g., use largest deviation in z-axis. The gumbel transform\n # can still be applied regardless.\n\n # Add gumbel noise for multinomial sampling.\n sampling_logits = self._GumbelTransform(flattened_num_points)\n _, locations = tf.nn.top_k(\n sampling_logits, k=min(p.num_pillars, num_grid_points))\n\n # Unravel coordinates back to grid locations.\n locations = tf.unravel_index(locations, p.grid_size)\n\n # Unravel index will return a 3 x num_locations tensor, this needs to be\n # transposed so that we have it as num_locations x 3.\n locations = py_utils.HasShape(locations, [3, -1])\n locations = tf.transpose(locations)\n\n return locations\n\n def TransformFeatures(self, features):\n p = self.params\n\n num_points = features.grid_num_points\n if p.use_density_sampler:\n locations = self._DensitySample(num_points)\n else:\n # Select non-empty cells uniformly at random.\n locations = tf.random.shuffle(tf.cast(tf.where(num_points > 0), tf.int32))\n\n num_features = py_utils.GetShape(features.laser_grid)[-1]\n\n # [nx, ny, nz, np, 4] (x, y, z, f)\n points = features.laser_grid\n # [K, np, 4] (x, y, z, f)\n points = tf.gather_nd(points, locations)\n # [nx, ny, nz, 1, 3] (cx, cy, cz)\n centers = features.grid_centers[..., tf.newaxis, :]\n # [K, 1, 3] (cx, cy, cz)\n centers = tf.gather_nd(centers, locations)\n # NOTE: If there are fewer pillars than p.num_pillars, the following\n # padding creates many 'fake' pillars at grid cell (0, 0, 0) with\n # an all-zero pillar. Hopefully, the model can learn to ignore these.\n #\n # pillar_points[i, :, :] is the pillar located at pillar_locations[i, :3],\n # and pillar_points[i, :, :] == points_grid_full[pillar_locations[i, :3]].\n # for 0 <= i < pillar_count;\n # pillar_locations[i, :3] are zero-ed, for i >= pillar_count.\n features.pillar_count = tf.shape(locations)[0]\n features.pillar_locations = py_utils.PadOrTrimTo(locations,\n [p.num_pillars, 3])\n features.pillar_points = py_utils.PadOrTrimTo(\n points, [p.num_pillars, p.num_points_per_cell, num_features])\n features.pillar_centers = py_utils.PadOrTrimTo(centers,\n [p.num_pillars, 1, 3])\n\n if p.drop_laser_grid:\n del features['laser_grid']\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n num_features = shapes.laser_grid[-1]\n shapes.pillar_count = tf.TensorShape([])\n shapes.pillar_locations = tf.TensorShape([p.num_pillars, 3])\n shapes.pillar_points = tf.TensorShape(\n [p.num_pillars, p.num_points_per_cell, num_features])\n shapes.pillar_centers = tf.TensorShape([p.num_pillars, 1, 3])\n if p.drop_laser_grid:\n del shapes['laser_grid']\n return shapes\n\n def TransformDTypes(self, dtypes):\n p = self.params\n dtypes.pillar_count = tf.int32\n dtypes.pillar_locations = tf.int32\n dtypes.pillar_points = tf.float32\n dtypes.pillar_centers = tf.float32\n if p.drop_laser_grid:\n del dtypes['laser_grid']\n return dtypes\n\n\nclass GridAnchorCenters(Preprocessor):\n \"\"\"Create anchor centers on a grid.\n\n Anchors are placed in the middle of each grid cell. For example, on a 2D grid\n range (0 -> 10, 0 -> 10) with a 10 x 5 grid size, the anchors will be placed\n at [(0.5, 1), (0.5, 3), ... , (9.5, 7), (9.5, 9)].\n\n Adds the following features:\n anchor_centers: [num_locations, 3] - Floating point output containing the\n center (x, y, z) locations for tiling anchor boxes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'grid_size', (20, 20, 1), 'Grid size along x,y,z axis. This will '\n 'be used to generate the anchor center locations. Note that this '\n 'would likely be different from the grid_* parameters in '\n 'LaserGridExtractor: the grid extractor may choose to extract '\n 'points more densely. Instead, this should correspond to the '\n 'model\\'s prediction layer: the predicted anchor box residuals '\n 'should match this grid.')\n p.Define('grid_range_x', (-25, 25), 'The x-axis range covered by the grid.')\n p.Define('grid_range_y', (-25, 25), 'The y-axis range covered by the grid.')\n p.Define('grid_range_z', (0, 0), 'The z-axis range covered by the grid.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n utils_3d = detection_3d_lib.Utils3D()\n\n # Compute the grid cell size and adjust the range sent to dense coordinates\n # by half a cell size so as to ensure that the anchors are placed in the\n # center of each grid cell.\n grid_size_x, grid_size_y, grid_size_z = p.grid_size\n grid_cell_sizes = [\n float(p.grid_range_x[1] - p.grid_range_x[0]) / grid_size_x,\n float(p.grid_range_y[1] - p.grid_range_y[0]) / grid_size_y,\n float(p.grid_range_z[1] - p.grid_range_z[0]) / grid_size_z,\n ]\n half_size_x, half_size_y, half_size_z = np.asarray(grid_cell_sizes) / 2.0\n\n grid_shape = list(p.grid_size) + [3]\n anchor_centers = utils_3d.CreateDenseCoordinates([\n [\n p.grid_range_x[0] + half_size_x,\n p.grid_range_x[1] - half_size_x,\n grid_size_x\n ],\n [\n p.grid_range_y[0] + half_size_y,\n p.grid_range_y[1] - half_size_y,\n grid_size_y\n ],\n [\n p.grid_range_z[0] + half_size_z,\n p.grid_range_z[1] - half_size_z,\n grid_size_z\n ],\n ]) # pyformat: disable\n features.anchor_centers = tf.reshape(anchor_centers, grid_shape)\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes.anchor_centers = tf.TensorShape(list(p.grid_size) + [3])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_centers = tf.float32\n return dtypes\n\n\nclass SparseCenterSelector(Preprocessor):\n \"\"\"Select centers for anchors and cells.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n If lasers.num_seeded_points of shape [] is provided, it indicates that the\n first num_seeded_points of lasers.points_xyz should be used as seeds for\n farthest point sampling (e.g., always chosen). Currently the concept\n of seeding is not implemented for anything but farthest point sampling.\n\n Adds the following features:\n anchor_centers: [num_cell_centers, 3] - Floating point output containing the\n center (x, y, z) locations for tiling anchor boxes.\n cell_center_xyz: [num_cell_centers, 3] - Floating point output containing\n the center (x, y, z) locations for each cell to featurize.\n \"\"\"\n\n _SAMPLING_METHODS = ['farthest_point', 'random_uniform']\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_cell_centers', 256, 'Number of centers.')\n p.Define(\n 'features_preparation_layers', [],\n 'A list of Params for layers to run on the features before '\n 'performing farthest point sampling. For example, one may wish to '\n 'drop points out of frustum for KITTI before selecting centers. '\n 'Note that these layers will not mutate the original features, '\n 'instead, a copy will be made.')\n p.Define(\n 'sampling_method', 'farthest_point',\n 'Which sampling method to use. One of {}'.format(cls._SAMPLING_METHODS))\n p.Define(\n 'fix_z_to_zero', True, 'Whether to fix z to 0 when retrieving the '\n 'center xyz coordinates.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n\n if p.sampling_method not in self._SAMPLING_METHODS:\n raise ValueError('Param `sampling_method` must be one of {}.'.format(\n self._SAMPLING_METHODS))\n if p.features_preparation_layers is not None:\n self.CreateChildren('features_preparation_layers',\n p.features_preparation_layers)\n\n def _FarthestPointSampleCenters(self, points_xyz, num_seeded_points):\n \"\"\"Samples centers with Farthest Point Sampling.\n\n Args:\n points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point\n (x, y, z) locations. We expect any padded points to be removed before\n this function is called.\n num_seeded_points: integer indicating how many of the first\n num_seeded_points points in points_xyz should be considered\n as seeds for FPS (always chosen).\n\n Returns:\n A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers\n to use as anchors.\n \"\"\"\n p = self.params\n num_points = tf.shape(points_xyz)[0]\n points_padding = tf.zeros((num_points,), dtype=tf.float32)\n padded_num_points = tf.maximum(num_points, p.num_cell_centers)\n\n # Pad both the points and padding if for some reason the input pointcloud\n # has less points than p.num_cell_centers.\n points_xy = py_utils.PadOrTrimTo(points_xyz[:, :2], [padded_num_points, 2])\n points_padding = py_utils.PadOrTrimTo(\n points_padding, [padded_num_points], pad_val=1.0)\n\n sampled_idx, _ = car_lib.FarthestPointSampler(\n points_xy[tf.newaxis, ...],\n points_padding[tf.newaxis, ...],\n p.num_cell_centers,\n num_seeded_points=num_seeded_points,\n random_seed=p.random_seed)\n sampled_idx = sampled_idx[0, :]\n\n # Gather centers.\n if p.fix_z_to_zero:\n centers = tf.concat([\n tf.gather(points_xy, sampled_idx),\n tf.zeros((p.num_cell_centers, 1)),\n ], axis=-1) # pyformat: disable\n else:\n centers = tf.gather(points_xyz, sampled_idx)\n\n return centers\n\n def _RandomUniformSampleCenters(self, points_xyz):\n \"\"\"Samples centers with Random Uniform Sampling.\n\n Args:\n points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point\n (x, y, z) locations. We expect any padded points to be removed before\n this function is called.\n\n Returns:\n A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers\n to use as anchors.\n \"\"\"\n p = self.params\n # We want the center Z value to be 0 so just exclude it\n centers_xy = tf.random.shuffle(points_xyz[:, :2], seed=p.random_seed)\n selected_centers_xy = py_utils.PadOrTrimTo(centers_xy,\n [p.num_cell_centers, 2])\n return tf.concat([selected_centers_xy,\n tf.zeros((p.num_cell_centers, 1))],\n axis=-1)\n\n def _SampleCenters(self, points_xyz, num_seeded_points):\n p = self.params\n if p.sampling_method == 'farthest_point':\n return self._FarthestPointSampleCenters(points_xyz, num_seeded_points)\n elif p.sampling_method == 'random_uniform':\n if num_seeded_points > 0:\n raise NotImplementedError(\n 'Random sampling with seeded points not yet implemented.')\n return self._RandomUniformSampleCenters(points_xyz)\n else:\n raise ValueError('Param `sampling_method` must be one of {}.'.format(\n self._SAMPLING_METHODS))\n\n def TransformFeatures(self, features):\n p = self.params\n\n prepared_features = features.DeepCopy()\n for prep_layer in self.features_preparation_layers:\n prepared_features = prep_layer.FPropDefaultTheta(prepared_features)\n\n num_seeded_points = prepared_features.lasers.get('num_seeded_points', 0)\n points_data = prepared_features.lasers\n\n points_xyz = points_data.points_xyz\n if 'points_padding' in points_data:\n points_padding = points_data.points_padding\n points_mask = 1 - points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n\n centers = self._SampleCenters(points_xyz, num_seeded_points)\n centers = py_utils.HasShape(centers, [p.num_cell_centers, 3])\n\n features.anchor_centers = centers\n features.cell_center_xyz = centers\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes.anchor_centers = tf.TensorShape([p.num_cell_centers, 3])\n shapes.cell_center_xyz = tf.TensorShape([p.num_cell_centers, 3])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_centers = tf.float32\n dtypes.cell_center_xyz = tf.float32\n return dtypes\n\n\nclass SparseCellGatherFeatures(Preprocessor):\n \"\"\"Select local features for each cell.\n\n This preprocessor expects features to contain:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n - cell_center_xyz of shape [C, 3]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Adds the following features:\n cell_points_xyz: [num_centers, num_points_per_cell, 3] - Floating point\n output containing the (x, y, z) locations for each point for a given\n center.\n cell_feature: [num_centers, num_points_per_cell, F] - Floating point output\n containing the features for each point for a given center.\n cell_points_padding: [num_centers, num_points_per_cell] - 0/1 padding\n for the points in each cell.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_points_per_cell', 128, 'The number of points per cell.')\n p.Define('max_distance', 3.0, 'Max distance of point to cell center.')\n p.Define(\n 'sample_neighbors_uniformly', False,\n 'Whether to sample the neighbor points for every cell center '\n 'uniformly at random. If False, this will default to selecting by '\n 'distance.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n num_centers = py_utils.GetShape(features.cell_center_xyz, 1)[0]\n num_features = py_utils.GetShape(features.lasers.points_feature)[-1]\n\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n points_feature = tf.boolean_mask(points_feature, points_mask)\n\n # Note: points_xyz and points_feature must be unpadded as we pass\n # padding=None to neighborhood indices. Ensuring that it is unpadded\n # helps improve performance.\n\n # Get nearby points using kNN.\n sample_indices, sample_indices_padding = car_lib.NeighborhoodIndices(\n tf.expand_dims(points_xyz, 0),\n tf.expand_dims(features.cell_center_xyz, 0),\n p.num_points_per_cell,\n points_padding=None,\n max_distance=p.max_distance,\n sample_neighbors_uniformly=p.sample_neighbors_uniformly)\n\n # Take first example since NeighboorhoodIndices expects batch dimension.\n sample_indices = sample_indices[0, :, :]\n sample_indices_padding = sample_indices_padding[0, :, :]\n\n sample_indices = py_utils.HasShape(sample_indices,\n [num_centers, p.num_points_per_cell])\n\n cell_points_xyz = tf.gather(points_xyz, sample_indices)\n cell_points_xyz = py_utils.HasShape(cell_points_xyz,\n [num_centers, p.num_points_per_cell, 3])\n\n cell_feature = tf.gather(points_feature, sample_indices)\n cell_feature = py_utils.HasShape(\n cell_feature, [num_centers, p.num_points_per_cell, num_features])\n\n cell_points_padding = py_utils.HasShape(\n sample_indices_padding, [num_centers, p.num_points_per_cell])\n\n features.update({\n 'cell_points_xyz': cell_points_xyz,\n 'cell_feature': cell_feature,\n 'cell_points_padding': cell_points_padding,\n })\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n num_centers = shapes.cell_center_xyz[0]\n base_shape = [num_centers, p.num_points_per_cell]\n num_features = shapes.lasers.points_feature[-1]\n shapes.cell_points_xyz = tf.TensorShape(base_shape + [3])\n shapes.cell_feature = tf.TensorShape(base_shape + [num_features])\n shapes.cell_points_padding = tf.TensorShape(base_shape)\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.cell_points_xyz = tf.float32\n dtypes.cell_feature = tf.float32\n dtypes.cell_points_padding = tf.float32\n return dtypes\n\n\nclass SparseCellCentersTopK(Preprocessor):\n \"\"\"Given selected centers and gathered points/features, apply a filter.\n\n This preprocessor expects features to contain `cell_center_xyz` and all\n entries in params.features_to_modify, and that the leading dimension should\n all be the same (num_cell_centers from SparseCenterSelector).\n\n We then modify all values in features that are specified in\n params.features_to_modify by sorting them with the specified sort function\n (specified by params.sort_by) operating on features.cell_center_xyz, and then\n taking the top K (specified by params.num_cell_centers) along the first\n dimension.\n \"\"\"\n\n _REGISTERED_SORT_FUNCTIONS = ['distance']\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_cell_centers', 512, 'The number of centers after filtering.')\n p.Define(\n 'sort_by', 'distance', 'A string specifying which sort function '\n 'to use. Currently we just support `distance`.')\n p.Define('features_to_modify', [\n 'cell_center_xyz', 'anchor_centers', 'cell_points_xyz', 'cell_feature',\n 'cell_points_padding'\n ], 'A list of keys from the features dict to modify.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.sort_by not in self._REGISTERED_SORT_FUNCTIONS:\n raise ValueError('{} not supported. We only support {}.'.format(\n p.sort_by, self._REGISTERED_SORT_FUNCTIONS))\n if len(p.features_to_modify) < 1:\n raise ValueError('Need to modify at least one feature.')\n\n def _SortByDistance(self, features):\n dist = tf.linalg.norm(features.cell_center_xyz, axis=-1)\n return tf.argsort(dist, axis=-1, direction='ASCENDING')\n\n def _Sort(self, features):\n p = self.params\n if p.sort_by == 'distance':\n return self._SortByDistance(features)\n else:\n raise ValueError('Unsupported sort function: {}.'.format(p.sort_by))\n\n def TransformFeatures(self, features):\n p = self.params\n sort_indices = self._Sort(features)\n sort_indices_top_k = sort_indices[:p.num_cell_centers, ...]\n\n # Gather each of the relevant items\n for key in p.features_to_modify:\n shape = py_utils.GetShape(features[key])\n output_shape = [p.num_cell_centers] + shape[1:]\n features[key] = py_utils.PadOrTrimTo(\n tf.gather(features[key], sort_indices_top_k), output_shape)\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n for key in p.features_to_modify:\n shapes[key] = tf.TensorShape([p.num_cell_centers] + shapes[key][1:])\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass TileAnchorBBoxes(Preprocessor):\n \"\"\"Creates anchor_bboxes given anchor_centers.\n\n This preprocessor expects features to contain the following keys:\n - anchor_centers of shape [...base shape..., 3]\n\n Adds the following features:\n anchor_bboxes: base_shape + [7] - Floating point anchor box\n output containing the anchor boxes and the 7 floating point\n values for each box that define the box (x, y, z, dx, dy, dz, phi).\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('anchor_box_dimensions', [],\n 'List of anchor box sizes per center.')\n p.Define('anchor_box_offsets', [], 'List of anchor box offsets per center.')\n p.Define('anchor_box_rotations', [],\n 'List of anchor box rotations per center.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n utils_3d = detection_3d_lib.Utils3D()\n\n assert p.anchor_box_dimensions\n assert p.anchor_box_offsets\n assert p.anchor_box_rotations\n\n base_shape = py_utils.GetShape(features.anchor_centers)[:-1]\n num_box_per_center = len(p.anchor_box_dimensions)\n\n anchor_centers = tf.reshape(features.anchor_centers, [-1, 3])\n anchor_bboxes = utils_3d.MakeAnchorBoxes(\n anchor_centers, tf.identity(p.anchor_box_dimensions),\n tf.identity(p.anchor_box_offsets), tf.identity(p.anchor_box_rotations))\n features.anchor_bboxes = tf.reshape(anchor_bboxes,\n base_shape + [num_box_per_center, 7])\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n base_shape = shapes.anchor_centers[:-1]\n num_box_per_center = len(p.anchor_box_dimensions)\n shapes.anchor_bboxes = base_shape.concatenate([num_box_per_center, 7])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_bboxes = tf.float32\n return dtypes\n\n\nclass _AnchorBoxSettings:\n \"\"\"Helper class to parameterize and update anchor box settings.\"\"\"\n # Implementations should fill out the following class members.\n DIMENSION_PRIORS = []\n ROTATIONS = []\n CENTER_X_OFFSETS = []\n CENTER_Y_OFFSETS = []\n CENTER_Z_OFFSETS = []\n\n @classmethod\n def NumAnchors(cls):\n return np.prod([\n len(cls.DIMENSION_PRIORS),\n len(cls.ROTATIONS),\n len(cls.CENTER_X_OFFSETS),\n len(cls.CENTER_Y_OFFSETS),\n len(cls.CENTER_Z_OFFSETS)\n ])\n\n @classmethod\n def GenerateAnchorSettings(cls):\n \"\"\"Generate anchor settings.\n\n Returns:\n A `NestedMap` containing three lists of the same length:\n - anchor_box_dimensions\n - anchor_box_rotations\n - anchor_box_offsets\n\n These can be used with the TileAnchorBBoxes preprocessor.\n \"\"\"\n anchor_box_dimensions = []\n anchor_box_rotations = []\n anchor_box_offsets = []\n\n # The following is equivalent to a formulation of itertools.product, but\n # is explicitly listed for readability.\n\n # *Please note*: The ordering is important for ModelV2, which makes\n # assumptions that the offset dimensions come first.\n for cx in cls.CENTER_X_OFFSETS:\n for cy in cls.CENTER_Y_OFFSETS:\n for cz in cls.CENTER_Z_OFFSETS:\n for rot in cls.ROTATIONS:\n for dims in cls.DIMENSION_PRIORS:\n anchor_box_dimensions += [dims]\n anchor_box_rotations += [rot]\n anchor_box_offsets += [(cx, cy, cz)]\n\n # Check one of the lists has entries.\n assert anchor_box_dimensions\n\n return py_utils.NestedMap(\n anchor_box_dimensions=anchor_box_dimensions,\n anchor_box_rotations=anchor_box_rotations,\n anchor_box_offsets=anchor_box_offsets)\n\n @classmethod\n def Update(cls, params):\n \"\"\"Updates anchor box settings from input configuration lists.\n\n Given dimensions priors, rotations, and offsets, computes the cartesian\n product of the settings.\n\n Args:\n params: The KITTIAnchorExtractorBase.Params() object to update.\n\n Returns:\n Params updated with the anchor settings.\n\n In total there are N combinations, where each (anchor_box_dimensions[i],\n anchor_box_rotations[i], anchor_box_offsets[i]) for i in range(N) is an\n option.\n \"\"\"\n p = params\n settings = cls.GenerateAnchorSettings()\n p.anchor_box_dimensions = settings.anchor_box_dimensions\n p.anchor_box_rotations = settings.anchor_box_rotations\n p.anchor_box_offsets = settings.anchor_box_offsets\n return p\n\n\ndef MakeAnchorBoxSettings(dimension_priors, rotations, center_x_offsets,\n center_y_offsets, center_z_offsets):\n \"\"\"Returns a configured class for setting anchor box settings.\"\"\"\n\n class CustomAnchorBoxSettings(_AnchorBoxSettings):\n DIMENSION_PRIORS = dimension_priors\n ROTATIONS = rotations\n CENTER_X_OFFSETS = center_x_offsets\n CENTER_Y_OFFSETS = center_y_offsets\n CENTER_Z_OFFSETS = center_z_offsets\n\n return CustomAnchorBoxSettings\n\n\nclass SparseCarV1AnchorBoxSettings(_AnchorBoxSettings):\n \"\"\"Anchor box settings for training on Cars for Sparse models.\"\"\"\n # Borrowed from PointPillar dimension prior for cars.\n DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]\n\n # 4 Rotations with axis aligned and both diagonals.\n ROTATIONS = [0, np.pi / 2, np.pi / 4, 3 * np.pi / 4]\n\n # 25 offsets per anchor box with fixed z offset at -1.\n CENTER_X_OFFSETS = np.linspace(-1.5, 1.5, 5)\n CENTER_Y_OFFSETS = np.linspace(-1.5, 1.5, 5)\n CENTER_Z_OFFSETS = [-1.]\n\n\nclass PointPillarAnchorBoxSettingsCar(_AnchorBoxSettings):\n DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]\n ROTATIONS = [0, np.pi / 2]\n # Fixed offset for every anchor box, based on a reading of the paper / code\n # 0 offsets for x and y, and -1 for z.\n CENTER_X_OFFSETS = [0.]\n CENTER_Y_OFFSETS = [0.]\n CENTER_Z_OFFSETS = [-1.]\n\n\nclass PointPillarAnchorBoxSettingsPed(PointPillarAnchorBoxSettingsCar):\n DIMENSION_PRIORS = [(0.6, 0.8, 1.73)]\n CENTER_Z_OFFSETS = [-0.6]\n\n\nclass PointPillarAnchorBoxSettingsCyc(PointPillarAnchorBoxSettingsCar):\n DIMENSION_PRIORS = [(0.6, 1.76, 1.73)]\n CENTER_Z_OFFSETS = [-0.6]\n\n\nclass PointPillarAnchorBoxSettingsPedCyc(PointPillarAnchorBoxSettingsCar):\n DIMENSION_PRIORS = [(0.6, 0.8, 1.7), (0.6, 1.76, 1.73)]\n CENTER_Z_OFFSETS = [-0.6]\n\n\nclass AnchorAssignment(Preprocessor):\n \"\"\"Perform anchor assignment on the features.\n\n This preprocessor expects features to contain the following keys:\n - anchor_bboxes of shape [...base shape..., 7]\n - labels.bboxes_3d\n - labels.labels\n - labels.bboxes_3d_mask\n\n Adds the following features:\n\n anchor_localization_residuals: base_shape + [7] floating point tensor of\n residuals. The model is expected to regress against these residuals as\n targets. The residuals can be converted back into bboxes using\n detection_3d_lib.Utils3D.ResidualsToBBoxes.\n assigned_gt_idx: base_shape - The corresponding index of the ground\n truth bounding box for each anchor box in anchor_bboxes, anchors not\n assigned will have idx be set to -1.\n assigned_gt_bbox: base_shape + [7] - The corresponding ground\n truth bounding box for each anchor box in anchor_bboxes.\n assigned_gt_labels: base_shape - The assigned groundtruth label\n for each anchor box.\n assigned_gt_similarity_score: base_shape - The similarity score\n for each assigned anchor box.\n assigned_cls_mask: base_shape mask for classification loss per anchor.\n This should be 1.0 if the anchor has a foreground or background\n assignment; otherwise, it will be assigned to 0.0.\n assigned_reg_mask: base_shape mask for regression loss per anchor.\n This should be 1.0 if the anchor has a foreground assignment;\n otherwise, it will be assigned to 0.0.\n Note: background anchors do not have regression targets.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'foreground_assignment_threshold', 0.5,\n 'Score (usually IOU) threshold for assigning a box as foreground.')\n p.Define(\n 'background_assignment_threshold', 0.35,\n 'Score (usually IOU) threshold for assigning a box as background.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n utils_3d = detection_3d_lib.Utils3D()\n\n # anchor_bboxes will be returned with shape [#centers, #boxes_per_center, 7]\n # flatten boxes here for matching.\n base_shape = py_utils.GetShape(features.anchor_bboxes)[:-1]\n anchor_bboxes = tf.reshape(features.anchor_bboxes, [-1, 7])\n\n assigned_anchors = utils_3d.AssignAnchors(\n anchor_bboxes,\n features.labels.bboxes_3d,\n features.labels.labels,\n features.labels.bboxes_3d_mask,\n foreground_assignment_threshold=p.foreground_assignment_threshold,\n background_assignment_threshold=p.background_assignment_threshold)\n\n # Add new features.\n features.assigned_gt_idx = tf.reshape(assigned_anchors.assigned_gt_idx,\n base_shape)\n features.assigned_gt_bbox = tf.reshape(assigned_anchors.assigned_gt_bbox,\n base_shape + [7])\n features.assigned_gt_labels = tf.reshape(\n assigned_anchors.assigned_gt_labels, base_shape)\n features.assigned_gt_similarity_score = tf.reshape(\n assigned_anchors.assigned_gt_similarity_score, base_shape)\n features.assigned_cls_mask = tf.reshape(assigned_anchors.assigned_cls_mask,\n base_shape)\n features.assigned_reg_mask = tf.reshape(assigned_anchors.assigned_reg_mask,\n base_shape)\n\n # Compute residuals.\n features.anchor_localization_residuals = utils_3d.LocalizationResiduals(\n features.anchor_bboxes, features.assigned_gt_bbox)\n\n return features\n\n def TransformShapes(self, shapes):\n base_shape = shapes.anchor_bboxes[:-1]\n box_shape = base_shape.concatenate([7])\n\n shapes.anchor_localization_residuals = box_shape\n shapes.assigned_gt_idx = base_shape\n shapes.assigned_gt_bbox = box_shape\n shapes.assigned_gt_labels = base_shape\n shapes.assigned_gt_similarity_score = base_shape\n shapes.assigned_cls_mask = base_shape\n shapes.assigned_reg_mask = base_shape\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_localization_residuals = tf.float32\n dtypes.assigned_gt_idx = tf.int32\n dtypes.assigned_gt_bbox = tf.float32\n dtypes.assigned_gt_labels = tf.int32\n dtypes.assigned_gt_similarity_score = tf.float32\n dtypes.assigned_cls_mask = tf.float32\n dtypes.assigned_reg_mask = tf.float32\n return dtypes\n\n\nclass DropLaserPointsOutOfRange(Preprocessor):\n \"\"\"Drops laser points that are out of pre-defined x/y/z ranges.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n Removes or sets padding to 1 for all points outside a given range. Modifies\n all items in the lasers subdictionary like lasers.points_xyz,\n lasers.points_feature, lasers.points_padding, and optionally\n lasers.points_label, lasers.points_bbox_id.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_x_range', (-np.inf, np.inf),\n 'Only points that have x coordinates within this range are kept.')\n p.Define('keep_y_range', (-np.inf, np.inf),\n 'Only points that have y coordinates within this range are kept.')\n p.Define(\n 'keep_z_range', (-np.inf, np.inf),\n 'Only points that have z coordinates within this range are kept. '\n 'Approximate ground-removal can be performed by specifying a '\n 'lower-bound on the z-range.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n points_xyz = features.lasers.points_xyz\n if 'points_padding' in features.lasers:\n points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)\n else:\n # All points are real, we keep points unpadded by applying boolean_mask\n # on points_mask later.\n points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)\n\n min_x, max_x = p.keep_x_range\n min_y, max_y = p.keep_y_range\n min_z, max_z = p.keep_z_range\n\n # Short-circuit if all ranges are set to -inf, inf.\n if (np.all(np.isneginf([min_x, min_y, min_z])) and\n np.all(np.isposinf([max_x, max_y, max_z]))):\n return features\n\n if min_x != -np.inf:\n points_mask &= points_xyz[:, 0] >= min_x\n if min_y != -np.inf:\n points_mask &= points_xyz[:, 1] >= min_y\n if min_z != -np.inf:\n points_mask &= points_xyz[:, 2] >= min_z\n\n if max_x != np.inf:\n points_mask &= points_xyz[:, 0] <= max_x\n if max_y != np.inf:\n points_mask &= points_xyz[:, 1] <= max_y\n if max_z != np.inf:\n points_mask &= points_xyz[:, 2] <= max_z\n\n if 'points_padding' in features.lasers:\n # Suffices to just update the padding.\n features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)\n else:\n features.lasers = features.lasers.Transform(\n _GetApplyPointMaskFn(points_mask))\n\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass KITTIDropPointsOutOfFrustum(Preprocessor):\n \"\"\"Drops laser points that are outside of the camera frustum.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n - images.velo_to_image_plane of shape [3, 4]\n - images.width of shape [1]\n - images.height of shape [1]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n lasers.points_xyz, lasers.points_feature, lasers.points_padding, and\n optionally lasers.points_label, lasers.points_bbox_id so that\n points outside the frustum have padding set to 1 or are removed.\n \"\"\"\n\n def TransformFeatures(self, features):\n # Drop points behind the car (behind x-axis = 0).\n images = features.images\n front_indices = features.lasers.points_xyz[:, 0] >= 0\n\n if 'points_padding' not in features.lasers:\n # Keep tensors unpadded and small using boolean_mask.\n features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,\n front_indices)\n features.lasers.points_feature = tf.boolean_mask(\n features.lasers.points_feature, front_indices)\n\n # Drop those points outside the image plane.\n points_image = geometry.PointsToImagePlane(features.lasers.points_xyz,\n images.velo_to_image_plane)\n in_image_plane = (\n (points_image[:, 0] >= 0) &\n (points_image[:, 0] <= tf.cast(images.width, tf.float32)) &\n (points_image[:, 1] >= 0) &\n (points_image[:, 1] <= tf.cast(images.height, tf.float32)))\n\n if 'points_padding' in features.lasers:\n # Update padding to only include front indices and in image plane.\n points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)\n points_mask &= front_indices\n points_mask &= in_image_plane\n features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)\n else:\n features.lasers = features.lasers.Transform(\n _GetApplyPointMaskFn(in_image_plane))\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomWorldRotationAboutZAxis(Preprocessor):\n \"\"\"Rotates the world randomly as a form of data augmentation.\n\n Rotations are performed around the *z-axis*. This assumes that the car is\n always level. In general, we'd like to instead rotate the car on the spot,\n this would then make sense for cases where the car is on a slope.\n\n When there are leading dimensions, this will rotate the boxes with the same\n transformation across all the frames. This is useful when the input is a\n sequence of frames from the same run segment.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [..., 3]\n - labels.bboxes_3d of shape [..., 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same rotation applied to both.\n\n Adds the following features:\n world_rot_z which contains the rotation applied to the example.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'max_rotation', None,\n 'The rotation amount will be randomly picked from '\n '[-max_rotation, max_rotation).')\n p.Define(\n 'include_world_rot_z', True,\n 'Whether to include the applied rotation as an additional tensor. '\n 'It can be helpful to disable this when using the preprocessor in a '\n 'way that expects the structure of the features to be the same '\n '(e.g., as a branch in tf.cond).')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.max_rotation is None:\n raise ValueError('max_rotation needs to be specified, instead of None.')\n\n def TransformFeatures(self, features):\n p = self.params\n rot = tf.random.uniform((),\n minval=-p.max_rotation,\n maxval=p.max_rotation,\n seed=p.random_seed)\n\n # Rotating about the z-axis is equal to experiencing yaw.\n pose = [0., 0., 0., rot, 0., 0.]\n\n # Rotate points.\n features.lasers.points_xyz = geometry.CoordinateTransform(\n features.lasers.points_xyz, pose)\n\n # Rotate bboxes, note that heading has a special case.\n bboxes_xyz = features.labels.bboxes_3d[..., :3]\n bboxes_dims = features.labels.bboxes_3d[..., 3:6]\n bboxes_rot = features.labels.bboxes_3d[..., 6:]\n\n bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)\n\n # The heading correction should subtract rot from the bboxes rotations.\n bboxes_rot = geometry.WrapAngleRad(bboxes_rot - rot)\n\n features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],\n axis=-1)\n if p.include_world_rot_z:\n features.world_rot_z = rot\n return features\n\n def TransformShapes(self, shapes):\n if self.params.include_world_rot_z:\n shapes.world_rot_z = tf.TensorShape([])\n return shapes\n\n def TransformDTypes(self, dtypes):\n if self.params.include_world_rot_z:\n dtypes.world_rot_z = tf.float32\n return dtypes\n\n\nclass DropPointsOutOfFrustum(Preprocessor):\n \"\"\"Drops points outside of pre-defined theta / phi ranges.\n\n Note that the ranges for keep_phi_range can be negative, this is because the\n phi values wrap around 2*pi. Thus, a valid range that filters the 90 deg\n frontal field of view of the car can be specified as [-pi/4, pi/4].\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 7]\n - lasers.points_feature of shape [P]\n\n Modifies the following features:\n - lasers.points_xyz removing any points out of frustum.\n - lasers.points_feature removing any points out of frustum.\n\n Note: We expect a downstream processor that filters out boxes with few points\n to drop the corresponding bboxes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_theta_range', (0., np.pi),\n 'Only points that have theta coordinates within this range.')\n p.Define('keep_phi_range', (0., 2. * np.pi),\n 'Only points that have phi coordinates within this range.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n if 'points_padding' in features.lasers:\n raise ValueError('DropPointsOutOfFrustum preprocessor does not support '\n 'padded lasers.')\n\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n\n min_theta, max_theta = p.keep_theta_range\n if (min_theta < 0. or min_theta > np.pi or max_theta < 0. or\n max_theta > np.pi):\n raise ValueError('Valid values for theta are between 0 and pi, '\n 'keep_theta_range={}'.format(p.keep_theta_range))\n\n if min_theta > max_theta:\n raise ValueError('min_theta must be <= max_theta, '\n 'keep_theta_range={}'.format(p.keep_theta_range))\n\n min_phi, max_phi = p.keep_phi_range\n if (min_phi < -2. * np.pi or min_phi > 2. * np.pi or\n max_phi < -2. * np.pi or max_phi > 2. * np.pi):\n raise ValueError('Valid values for phi are between -2*pi and 2*pi,'\n 'keep_phi_range={}'.format(p.keep_phi_range))\n\n if min_phi > max_phi:\n raise ValueError('min_phi must be <= max_phi, '\n 'keep_phi_range={}'.format(p.keep_phi_range))\n\n _, theta, phi = tf.unstack(\n geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)\n\n # phi is returned in range [-pi, pi], we shift the values which are between\n # [-pi, 0] to be [pi, 2pi] instead to make the logic below easier to follow.\n # Hence, all phi values after this will be [0, 2pi].\n phi = tf.where(phi >= 0., phi, 2. * np.pi + phi)\n\n # Theta does not have circular boundary conditions, a simple check suffices.\n points_mask = (theta >= min_theta) & (theta <= max_theta)\n\n if min_phi < 0. and max_phi < 0.:\n # Both are less than zero, we just just add 2pi and will use the regular\n # check.\n min_phi += 2. * np.pi\n max_phi += 2. * np.pi\n\n if min_phi < 0.:\n # The minimum threshold is below 0, so we split into checking between\n # (0 to min_phi) and (0 to max_phi). Note that min_phi is negative, but\n # phi is always positive, so we take 2*pi + min_phi to get the range of\n # appropriate values.\n points_mask &= (phi >= (2. * np.pi + min_phi)) | (phi <= max_phi)\n else:\n # Both must be greater than 0 if we get to this condition.\n assert min_phi >= 0.\n assert max_phi >= 0.\n points_mask &= (phi >= min_phi) & (phi <= max_phi)\n\n features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)\n features.lasers.points_feature = tf.boolean_mask(points_feature,\n points_mask)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass DropBoxesOutOfRange(Preprocessor):\n \"\"\"Drops boxes outside of pre-defined x/y/z ranges (boundaries inclusive).\n\n This preprocessor expects features to contain the following keys:\n - labels.bboxes_3d of shape [N, 7]\n - labels.bboxes_3d_mask of shape [N]\n\n Modifies the following features:\n - labels.bboxes_3d_mask to mask out any additional boxes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_x_range', (-np.inf, np.inf),\n 'Only boxes that have x coordinates within this range are kept.')\n p.Define('keep_y_range', (-np.inf, np.inf),\n 'Only boxes that have y coordinates within this range are kept.')\n p.Define('keep_z_range', (-np.inf, np.inf),\n 'Only boxes that have z coordinates within this range are kept.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n min_x, max_x = p.keep_x_range\n min_y, max_y = p.keep_y_range\n min_z, max_z = p.keep_z_range\n\n # Short-circuit if all ranges are set to -inf, inf.\n if (np.all(np.isneginf([min_x, min_y, min_z])) and\n np.all(np.isposinf([max_x, max_y, max_z]))):\n return features\n\n # For each bounding box, compute whether any of its extrema\n # fall outside of the range.\n bboxes_3d_corners = geometry.BBoxCorners(\n features.labels.bboxes_3d[tf.newaxis, ...])[0]\n bboxes_3d_corners = py_utils.HasShape(bboxes_3d_corners, [-1, 8, 3])\n\n min_bbox_x = tf.reduce_min(bboxes_3d_corners[:, :, 0], axis=-1)\n max_bbox_x = tf.reduce_max(bboxes_3d_corners[:, :, 0], axis=-1)\n\n min_bbox_y = tf.reduce_min(bboxes_3d_corners[:, :, 1], axis=-1)\n max_bbox_y = tf.reduce_max(bboxes_3d_corners[:, :, 1], axis=-1)\n\n min_bbox_z = tf.reduce_min(bboxes_3d_corners[:, :, 2], axis=-1)\n max_bbox_z = tf.reduce_max(bboxes_3d_corners[:, :, 2], axis=-1)\n\n mask = (\n tf.math.logical_and(min_bbox_x >= min_x, max_bbox_x <= max_x)\n & tf.math.logical_and(min_bbox_y >= min_y, max_bbox_y <= max_y)\n & tf.math.logical_and(min_bbox_z >= min_z, max_bbox_z <= max_z))\n\n max_num_boxes = py_utils.GetShape(features.labels.bboxes_3d_mask)\n mask = py_utils.HasShape(mask, max_num_boxes)\n\n features.labels.bboxes_3d_mask *= tf.cast(mask, tf.float32)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass PadLaserFeatures(Preprocessor):\n \"\"\"Pads laser features so that the dimensions are fixed.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n lasers.points_xyz and lasers.points_feature to add padding.\n Optionally also modifies lasers.points_label and lasers.points_bbox_id\n if they exist to add padding.\n Modifies/adds the following features:\n labels.points_padding of shape [P] representing the padding.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('max_num_points', 128500,\n 'Max number of points to pad the points to.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_mask = tf.cast(points_mask, tf.bool)\n features.lasers = features.lasers.Transform(\n _GetApplyPointMaskFn(points_mask))\n\n npoints = tf.shape(features.lasers.points_xyz)[0]\n features.lasers.points_padding = tf.ones([npoints])\n\n shuffled_idx = tf.range(npoints)\n shuffled_idx = tf.random.shuffle(shuffled_idx, seed=p.random_seed)\n\n def _PadOrTrimFn(points_tensor):\n # Shuffle before trimming so we have a random sampling\n points_tensor = tf.gather(points_tensor, shuffled_idx)\n return py_utils.PadOrTrimTo(points_tensor, [p.max_num_points] +\n points_tensor.shape[1:].as_list())\n\n features.lasers = features.lasers.Transform(_PadOrTrimFn)\n features.lasers.points_padding = 1.0 - features.lasers.points_padding\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n\n def _TransformShape(points_shape):\n return tf.TensorShape([p.max_num_points] + points_shape[1:].as_list())\n\n shapes.lasers = shapes.lasers.Transform(_TransformShape)\n shapes.lasers.points_padding = tf.TensorShape([p.max_num_points])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.lasers.points_padding = tf.float32\n return dtypes\n\n\nclass WorldScaling(Preprocessor):\n \"\"\"Scale the world randomly as a form of data augmentation.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same scaling applied to both.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('scaling', None, 'The scaling range.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.scaling is None:\n raise ValueError('scaling needs to be specified, instead of None.')\n if len(p.scaling) != 2:\n raise ValueError('scaling needs to be a list of two elements.')\n\n def TransformFeatures(self, features):\n p = self.params\n scaling = tf.random.uniform((),\n minval=p.scaling[0],\n maxval=p.scaling[1],\n seed=p.random_seed,\n dtype=features.lasers.points_xyz.dtype)\n\n # Scale points [num_points, 3].\n features.lasers.points_xyz *= scaling\n\n # Scaling bboxes (location and dimensions).\n bboxes_xyz = features.labels.bboxes_3d[..., :3] * scaling\n bboxes_dims = features.labels.bboxes_3d[..., 3:6] * scaling\n bboxes_rot = features.labels.bboxes_3d[..., 6:]\n\n features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],\n axis=-1)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomDropLaserPoints(Preprocessor):\n \"\"\"Randomly dropout laser points and the corresponding features.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, K]\n\n\n Modifies the following features:\n lasers.points_xyz, lasers.points_feature.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_prob', 0.95, 'Probability for keeping points.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n num_points, _ = py_utils.GetShape(features.lasers.points_xyz)\n\n pts_keep_sample_prob = tf.random.uniform([num_points],\n minval=0,\n maxval=1,\n seed=p.random_seed)\n pts_keep_mask = pts_keep_sample_prob < p.keep_prob\n\n if 'points_padding' in features.lasers:\n # Update points_padding so that where pts_keep_mask is True,\n # points_padding remains 0.\n points_mask = 1 - features.lasers.points_padding\n points_mask *= tf.cast(pts_keep_mask, tf.float32)\n features.lasers.points_padding = 1 - points_mask\n else:\n features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,\n pts_keep_mask)\n features.lasers.points_feature = tf.boolean_mask(\n features.lasers.points_feature, pts_keep_mask)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomFlipY(Preprocessor):\n \"\"\"Flip the world along axis Y as a form of data augmentation.\n\n When there are leading dimensions, this will flip the boxes with the same\n transformation across all the frames. This is useful when the input is a\n sequence of frames from the same run segment.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [..., 3]\n - labels.bboxes_3d of shape [..., 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same flipping applied to both.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('flip_probability', 0.5, 'Probability of flipping.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n threshold = 1. - p.flip_probability\n choice = tf.random.uniform(\n (), minval=0.0, maxval=1.0, seed=p.random_seed) >= threshold\n\n # Flip points\n points_xyz = features.lasers.points_xyz\n points_y = tf.where(choice, -points_xyz[..., 1:2], points_xyz[..., 1:2])\n features.lasers.points_xyz = tf.concat(\n [points_xyz[..., 0:1], points_y, points_xyz[..., 2:3]], axis=-1)\n\n # Flip boxes\n bboxes_xyz = features.labels.bboxes_3d[..., :3]\n bboxes_y = tf.where(choice, -bboxes_xyz[..., 1:2], bboxes_xyz[..., 1:2])\n bboxes_xyz = tf.concat(\n [bboxes_xyz[..., 0:1], bboxes_y, bboxes_xyz[..., 2:3]], axis=-1)\n # Compensate rotation.\n bboxes_dims = features.labels.bboxes_3d[..., 3:6]\n bboxes_rot = features.labels.bboxes_3d[..., 6:]\n bboxes_rot = tf.where(choice, geometry.WrapAngleRad(-bboxes_rot),\n bboxes_rot)\n features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],\n axis=-1)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass GlobalTranslateNoise(Preprocessor):\n \"\"\"Add global translation noise of xyz coordinates to points and boxes.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same\n random translation noise applied to both.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('noise_std', [0.2, 0.2, 0.2],\n 'Standard deviation of translation noise per axis.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n # Use three different seeds but the same base seed so\n # that the values are different.\n base_seed = p.random_seed\n x_seed = base_seed\n y_seed = None if base_seed is None else base_seed + 1\n z_seed = None if base_seed is None else base_seed + 2\n random_translate_x = tf.random.normal((),\n mean=0.0,\n stddev=p.noise_std[0],\n seed=x_seed)\n random_translate_y = tf.random.normal((),\n mean=0.0,\n stddev=p.noise_std[1],\n seed=y_seed)\n random_translate_z = tf.random.normal((),\n mean=0.0,\n stddev=p.noise_std[2],\n seed=z_seed)\n\n pose = tf.stack([\n random_translate_x, random_translate_y, random_translate_z, 0.0, 0.0,\n 0.0\n ],\n axis=0)\n\n # Translate points.\n points_xyz = features.lasers.points_xyz\n features.lasers.points_xyz = geometry.CoordinateTransform(points_xyz, pose)\n\n # Translate boxes\n bboxes_xyz = features.labels.bboxes_3d[..., :3]\n bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)\n features.labels.bboxes_3d = tf.concat(\n [bboxes_xyz, features.labels.bboxes_3d[..., 3:]], axis=-1)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomBBoxTransform(Preprocessor):\n \"\"\"Randomly transform bounding boxes and the points inside them.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, K]\n - lasers.points_padding of shape [P]\n - labels.bboxes_3d of shape [L, 7]\n - labels.bboxes_3d_mask of shape [L]\n\n Modifies the following features:\n lasers.points_{xyz,feature,padding}, labels.bboxes_3d with the\n transformed bounding boxes and points.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'max_rotation', None,\n 'The rotation amount will be randomly picked from '\n '[-max_rotation, max_rotation).')\n # At the moment we don't use this because it can cause boxes to collide with\n # each other. We need to compute box intersections when deciding whether to\n # apply the translation jitter. Theoretically we should also do this for\n # rotation.\n p.Define('noise_std', [0.0, 0.0, 0.0],\n 'Standard deviation of translation noise per axis.')\n p.Define(\n 'max_scaling', None,\n 'When max_scaling is not none, delta parameters s_x, s_y, s_z are '\n 'drawn from [-max_scaling[i], max_scaling[i]] where i is in [0, 3].')\n p.Define(\n 'max_shearing', None,\n 'When max_shearing is not none, shearing parameters sh_x^y, sh_x^z, '\n 'sh_y^x, sh_y^z, sh_z^x, sh_z^y are drawn from '\n '[-max_shearing[i], max_shearing[i]], where i is in [0, 5].')\n p.Define(\n 'max_num_points_per_bbox', 16384,\n 'The maximum number of points that fall within a bounding box. '\n 'Bounding boxes with more points than this value will '\n 'have some points droppped.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.max_rotation is None:\n raise ValueError('max_rotation needs to be specified, instead of None.')\n if p.max_scaling is not None:\n if len(p.max_scaling) != 3:\n raise ValueError('max_scaling needs to be specified as either None or '\n 'list of 3 floating point numbers, instead of {}.'\n ''.format(p.max_scaling))\n if p.max_shearing is not None:\n if len(p.max_shearing) != 6:\n raise ValueError('max_shearing needs to be specified as either None or '\n 'list of 6 floating point numbers, instead of {}.'\n ''.format(p.max_shearing))\n\n def _Foreground(self, features, points_xyz, points_feature, real_bboxes_3d,\n points_in_bbox_mask, rotation, translate_pose, transform_fn):\n \"\"\"Extract and transform foreground points and features.\"\"\"\n out_bbox_xyz, out_bbox_feature, out_bbox_mask = self._ForLoopBuffers(\n features)\n\n # Only iterate over the actual number of boxes in the scene.\n actual_num_bboxes = tf.reduce_sum(\n tf.cast(features.labels.bboxes_3d_mask, tf.int32))\n\n ret = py_utils.ForLoop(\n body=transform_fn,\n start=0,\n limit=actual_num_bboxes,\n delta=1,\n loop_state=py_utils.NestedMap(\n points_xyz=points_xyz,\n points_feature=points_feature,\n bboxes_3d=real_bboxes_3d,\n points_in_bbox_mask=points_in_bbox_mask,\n rotation=rotation,\n translate_pose=translate_pose,\n out_bbox_points=out_bbox_xyz,\n out_bbox_feature=out_bbox_feature,\n out_bbox_mask=out_bbox_mask))\n\n # Gather all of the transformed points and features\n out_bbox_xyz = tf.reshape(ret.out_bbox_points, [-1, 3])\n num_features = features.lasers.points_feature.shape[-1]\n out_bbox_feature = tf.reshape(ret.out_bbox_feature, [-1, num_features])\n out_bbox_mask = tf.cast(tf.reshape(ret.out_bbox_mask, [-1]), tf.bool)\n fg_xyz = tf.boolean_mask(out_bbox_xyz, out_bbox_mask)\n fg_feature = tf.boolean_mask(out_bbox_feature, out_bbox_mask)\n return fg_xyz, fg_feature\n\n def _Background(self, points_xyz, points_feature, points_in_bbox_mask):\n # If a point is in any bounding box, it is a foreground point.\n foreground_points_mask = tf.reduce_any(points_in_bbox_mask, axis=-1)\n # All others are background. We rotate all of the foreground points to\n # final_points_* and keep the background points unchanged\n background_points_mask = tf.math.logical_not(foreground_points_mask)\n background_points_xyz = tf.boolean_mask(points_xyz, background_points_mask)\n background_points_feature = tf.boolean_mask(points_feature,\n background_points_mask)\n return background_points_xyz, background_points_feature\n\n def _ForLoopBuffers(self, features):\n \"\"\"Create and return the buffers for the for loop.\"\"\"\n p = self.params\n bboxes_3d = features.labels.bboxes_3d\n\n # Compute the shapes and create the buffers for the For loop.\n max_num_bboxes = tf.shape(bboxes_3d)[0]\n per_box_shape = [max_num_bboxes, p.max_num_points_per_bbox, 3]\n out_bbox_points = inplace_ops.empty(\n per_box_shape, dtype=tf.float32, init=True)\n\n num_features = features.lasers.points_feature.shape[-1]\n bbox_feature_shape = [\n max_num_bboxes, p.max_num_points_per_bbox, num_features\n ]\n out_bbox_feature = inplace_ops.empty(\n bbox_feature_shape, dtype=tf.float32, init=True)\n\n per_box_mask_shape = [max_num_bboxes, p.max_num_points_per_bbox]\n out_bbox_mask = inplace_ops.empty(\n per_box_mask_shape, dtype=tf.float32, init=True)\n\n return out_bbox_points, out_bbox_feature, out_bbox_mask\n\n def TransformFeatures(self, features):\n p = self.params\n\n num_features = features.lasers.points_feature.shape[-1]\n\n def Transform(i, state):\n \"\"\"Transform the points in bounding box `i`.\"\"\"\n state.points_xyz = tf.reshape(state.points_xyz, [-1, 3])\n bbox_mask = tf.reshape(state.points_in_bbox_mask[:, i], [-1])\n\n # Fetch only the points in the bounding box.\n points_xyz_masked = tf.boolean_mask(state.points_xyz, bbox_mask)\n points_feature_masked = tf.boolean_mask(state.points_feature, bbox_mask)\n\n num_points = tf.shape(points_xyz_masked)[0]\n\n # TODO(vrv): Fold the following into a single transformation\n # matrix.\n #\n # Translate the box to the origin, then rotate the desired\n # rotation angle.\n translation_vec = state.bboxes_3d[i, 0:3]\n rotation_vec = [state.rotation[i], 0., 0.]\n pose = tf.concat([-translation_vec, rotation_vec], axis=0)\n points_xyz_adj = geometry.CoordinateTransform(points_xyz_masked, pose)\n if p.max_scaling is not None or p.max_shearing is not None:\n # Translate the points in the bounding box by moving dz/2 so that the\n # bottom of the bounding box is at Z = 0 when any of the two\n # (max_scaling or max_shearing) is not None\n translation_scale_or_shear = tf.stack(\n [0., 0., state.bboxes_3d[i, 5] / 2], axis=0)\n pose1 = tf.concat([translation_scale_or_shear, [0., 0., 0.]], axis=0)\n points_xyz_adj = geometry.CoordinateTransform(points_xyz_adj, pose1)\n else:\n translation_scale_or_shear = tf.stack([0., 0., 0.], axis=0)\n\n if p.max_scaling is not None:\n # Perform scaling to the point cloud\n # Scaling matrix\n # [[s_x+1 0 0]\n # [ 0 s_y+1 0]\n # [ 0 0 s_z+1]]\n sx = tf.random.uniform([],\n minval=-p.max_scaling[0],\n maxval=p.max_scaling[0],\n seed=p.random_seed)\n sy = tf.random.uniform([],\n minval=-p.max_scaling[1],\n maxval=p.max_scaling[1],\n seed=p.random_seed)\n sz = tf.random.uniform([],\n minval=-p.max_scaling[2],\n maxval=p.max_scaling[2],\n seed=p.random_seed)\n scaling_matrix = tf.stack(\n [[sx + 1., 0., 0.], [0., sy + 1., 0.], [0., 0., sz + 1.]], axis=0)\n\n points_xyz_adj = tf.einsum('ij,kj->ki', scaling_matrix, points_xyz_adj)\n\n if p.max_shearing is not None:\n # Perform shearing to the point cloud\n # Shearing matrix\n # [[1 sh_x^y sh_x^z]\n # [sh_y^x 1 sh_y^z]\n # [sh_z^x sh_z^y 1 ]]\n sxy = tf.random.uniform([],\n minval=-p.max_shearing[0],\n maxval=p.max_shearing[0],\n seed=p.random_seed)\n sxz = tf.random.uniform([],\n minval=-p.max_shearing[1],\n maxval=p.max_shearing[1],\n seed=p.random_seed)\n syx = tf.random.uniform([],\n minval=-p.max_shearing[2],\n maxval=p.max_shearing[2],\n seed=p.random_seed)\n syz = tf.random.uniform([],\n minval=-p.max_shearing[3],\n maxval=p.max_shearing[3],\n seed=p.random_seed)\n szx = tf.random.uniform([],\n minval=-p.max_shearing[4],\n maxval=p.max_shearing[4],\n seed=p.random_seed)\n szy = tf.random.uniform([],\n minval=-p.max_shearing[5],\n maxval=p.max_shearing[5],\n seed=p.random_seed)\n shearing_matrix = tf.stack(\n [[1., sxy, sxz], [syx, 1., syz], [szx, szy, 1.]], axis=0)\n points_xyz_adj = tf.einsum('ij,kj->ki', shearing_matrix, points_xyz_adj)\n\n # Translate the points back, adding noise if needed.\n translation_with_noise = (\n translation_vec - translation_scale_or_shear +\n state.translate_pose[i])\n pose2 = tf.concat([translation_with_noise, [0., 0., 0.]], axis=0)\n final_points_xyz = geometry.CoordinateTransform(points_xyz_adj, pose2)\n\n # final_points_xyz is an [M, 3] Tensor where M is the number of points in\n # the box.\n points_mask = tf.ones([num_points], dtype=tf.float32)\n\n final_points_xyz = py_utils.PadOrTrimTo(final_points_xyz,\n [p.max_num_points_per_bbox, 3])\n final_points_feature = py_utils.PadOrTrimTo(\n points_feature_masked, [p.max_num_points_per_bbox, num_features])\n points_mask = py_utils.PadOrTrimTo(points_mask,\n [p.max_num_points_per_bbox])\n state.out_bbox_points = inplace_ops.alias_inplace_update(\n state.out_bbox_points, [i], tf.expand_dims(final_points_xyz, 0))\n state.out_bbox_feature = inplace_ops.alias_inplace_update(\n state.out_bbox_feature, [i], tf.expand_dims(final_points_feature, 0))\n state.out_bbox_mask = inplace_ops.alias_inplace_update(\n state.out_bbox_mask, [i], tf.expand_dims(points_mask, 0))\n\n return state\n\n # Get the points and features that reside in boxes.\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(features.lasers.points_xyz, points_mask)\n points_feature = tf.boolean_mask(features.lasers.points_feature,\n points_mask)\n else:\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n\n # Fetch real bounding boxes and compute point mask.\n real_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d,\n features.labels.bboxes_3d_mask)\n points_in_bbox_mask = geometry.IsWithinBBox3D(points_xyz, real_bboxes_3d)\n\n # Choose a random rotation for every real box.\n num_boxes = tf.shape(real_bboxes_3d)[0]\n rotation = tf.random.uniform([num_boxes],\n minval=-p.max_rotation,\n maxval=p.max_rotation,\n seed=p.random_seed)\n\n base_seed = p.random_seed\n x_seed = base_seed\n y_seed = None if base_seed is None else base_seed + 1\n z_seed = None if base_seed is None else base_seed + 2\n random_translate_x = tf.random.normal([num_boxes],\n mean=0.0,\n stddev=p.noise_std[0],\n seed=x_seed)\n random_translate_y = tf.random.normal([num_boxes],\n mean=0.0,\n stddev=p.noise_std[1],\n seed=y_seed)\n random_translate_z = tf.random.normal([num_boxes],\n mean=0.0,\n stddev=p.noise_std[2],\n seed=z_seed)\n\n translate_pose = tf.stack(\n [random_translate_x, random_translate_y, random_translate_z], axis=1)\n\n fg_xyz, fg_feature = self._Foreground(features, points_xyz, points_feature,\n real_bboxes_3d, points_in_bbox_mask,\n rotation, translate_pose, Transform)\n\n # Concatenate them with the background points and features.\n bg_xyz, bg_feature = self._Background(points_xyz, points_feature,\n points_in_bbox_mask)\n all_points = tf.concat([bg_xyz, fg_xyz], axis=0)\n all_features = tf.concat([bg_feature, fg_feature], axis=0)\n\n # Shuffle the points/features randomly.\n all_points, all_features = _ConsistentShuffle((all_points, all_features),\n p.random_seed)\n\n # Padding should technically be unnecessary: the number of points before and\n # after should be the same, but in practice we sometimes seem to drop a few\n # points, and so we pad to make the shape fixed.\n #\n # TODO(vrv): Identify the source of this problem and then assert a shape\n # matching check.\n if 'points_padding' in features.lasers:\n features.lasers.points_xyz = py_utils.PadOrTrimTo(\n all_points, tf.shape(features.lasers.points_xyz))\n features.lasers.points_feature = py_utils.PadOrTrimTo(\n all_features, tf.shape(features.lasers.points_feature))\n total_points = tf.shape(all_points)[0]\n features.lasers.points_padding = 1.0 - py_utils.PadOrTrimTo(\n tf.ones([total_points]), tf.shape(features.lasers.points_padding))\n else:\n features.lasers.points_xyz = all_points\n features.lasers.points_feature = all_features\n\n # Translate noise.\n bboxes_xyz = real_bboxes_3d[..., :3]\n bboxes_xyz += translate_pose[..., :3]\n\n bboxes_dim = real_bboxes_3d[..., 3:6]\n # Rotate bboxes by their corresponding rotation.\n bboxes_rot = real_bboxes_3d[..., 6:]\n bboxes_rot -= rotation[:, tf.newaxis]\n features.labels.bboxes_3d = py_utils.PadOrTrimTo(\n tf.concat([bboxes_xyz, bboxes_dim, bboxes_rot], axis=-1),\n tf.shape(features.labels.bboxes_3d))\n features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(\n tf.ones(tf.shape(real_bboxes_3d)[0]),\n tf.shape(features.labels.bboxes_3d_mask))\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass GroundTruthAugmentor(Preprocessor):\n \"\"\"Augment bounding box labels and points from a database.\n\n This preprocessor expects features to contain the following keys:\n lasers.points_xyz of shape [P, 3]\n\n lasers.points_feature of shape [P, K]\n\n lasers.points_padding of shape [P]\n\n labels.bboxes_3d of shape [L, 7]\n\n labels.bboxes_3d_mask of shape [L]\n\n labels.labels of shape [L]\n\n Modifies the above features so that additional objects from\n a groundtruth database are added.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'groundtruth_database', None,\n 'If not None, loads groundtruths from this database and adds '\n 'them to the current scene. Groundtruth database is expected '\n 'to be a TFRecord of KITTI or Waymo crops.')\n p.Define(\n 'num_db_objects', None,\n 'Number of objects in the database. Because we use TFRecord '\n 'we cannot easily query the number of objects efficiencly.')\n p.Define('max_num_points_per_bbox', 2048,\n 'Maximum number of points in each bbox to augment with.')\n p.Define(\n 'filter_min_points', 0,\n 'Minimum number of points each database object must have '\n 'to be included in an example.')\n p.Define(\n 'filter_max_points', None,\n 'Maximum number of points each database object must have '\n 'to be included in an example.')\n p.Define(\n 'difficulty_sampling_probability', None,\n 'Probability for sampling ground truth example whose difficulty '\n 'equals {0, 1, 2, 3, ...}. Example: [1.0, 1.0, 1.0, 1.0] for '\n 'uniform sampling 4 different difficulties. Default value is '\n 'None = uniform sampling for all difficulties.')\n p.Define(\n 'class_sampling_probability', None,\n 'Probability for sampling ground truth example based on its class index'\n ' Example: For KITTI classes are [Background, Car, Van, Truck, '\n 'Pedestrian, Person_sitting, Cyclist, Tram, Misc, DontCare], using '\n 'probability vector [0., 1.0, 1.0, 0., 0., 0., 0.,0., 0., 0.], we '\n 'uniformly sampling Car and Van. Default value is None: Uses '\n 'label_filter flag and does not sample based on class.')\n p.Define('filter_min_difficulty', 0,\n 'Filter ground truth boxes whose difficulty is < this value.')\n p.Define('max_augmented_bboxes', 15,\n 'Maximum number of augmented bounding boxes per scene.')\n p.Define(\n 'label_filter', [],\n 'A list where if specified, only examples of these label integers will '\n 'be included in an example.')\n p.Define(\n 'batch_mode', False, 'Bool value to control whether the whole'\n 'groundtruth database is loaded or partially loaded to save memory'\n 'usage. Setting to False loads the whole ground truth database into '\n 'memory. Otherwise, only a fraction of the data will be loaded into '\n 'the memory.')\n return p\n\n def _ReadDB(self, file_patterns):\n \"\"\"Read the groundtruth database and return as a NestedMap of Tensors.\"\"\"\n p = self.params\n\n def Process(record):\n \"\"\"Process a groundtruth record.\"\"\"\n feature_map = {\n 'num_points': tf.io.FixedLenFeature((), tf.int64, 0),\n 'points': tf.io.VarLenFeature(dtype=tf.float32),\n 'points_feature': tf.io.VarLenFeature(dtype=tf.float32),\n 'bbox_3d': tf.io.VarLenFeature(dtype=tf.float32),\n 'label': tf.io.FixedLenFeature((), tf.int64, 0),\n 'difficulty': tf.io.FixedLenFeature((), tf.int64, 0),\n 'text': tf.io.VarLenFeature(dtype=tf.string),\n }\n\n example_data = tf.io.parse_single_example(record, feature_map)\n num_points = example_data['num_points']\n\n points = tf.reshape(_Dense(example_data['points']), [num_points, 3])\n features = tf.reshape(\n _Dense(example_data['points_feature']), [num_points, 1])\n points_mask = tf.ones(num_points, dtype=tf.bool)\n\n # TODO(vrv): Use random selection instead of first N points.\n points = py_utils.PadOrTrimTo(points, [p.max_num_points_per_bbox, 3])\n features = py_utils.PadOrTrimTo(features, [p.max_num_points_per_bbox, 1])\n points_mask = py_utils.PadOrTrimTo(points_mask,\n [p.max_num_points_per_bbox])\n\n bboxes_3d = tf.reshape(_Dense(example_data['bbox_3d']), [7])\n label = tf.cast(example_data['label'], tf.int32)\n difficulty = tf.cast(example_data['difficulty'], tf.int32)\n return (points, features, points_mask, bboxes_3d, label, difficulty)\n\n if p.batch_mode:\n # Prepare dataset for ground truth bounding boxes. Randomly shuffle the\n # file patterns.\n file_count = len(tf.io.gfile.glob(file_patterns))\n dataset = tf.stateless_list_files(file_patterns)\n dataset = dataset.apply(tf.stateless_cache_dataset())\n dataset = dataset.apply(\n tf.stateless_shuffle_dataset(\n buffer_size=file_count, reshuffle_each_iteration=True))\n dataset = dataset.interleave(\n tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)\n dataset = dataset.repeat()\n # Only prefetch a few objects from the database to reduce memory\n # consumption.\n dataset = dataset.map(Process, num_parallel_calls=10)\n # We need more bboxes than max_augmented_bboxes in a batch, because some\n # of the boxes are filtered out.\n dataset = dataset.batch(p.max_augmented_bboxes * 10)\n dataset = dataset.apply(tf.stateless_cache_dataset()).prefetch(\n p.max_augmented_bboxes * 30)\n else:\n # Prepare dataset for ground truth bounding boxes.\n dataset = tf.stateless_list_files(file_patterns)\n dataset = dataset.interleave(\n tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)\n # Read the entire dataset into memory.\n dataset = dataset.take(p.num_db_objects)\n dataset = dataset.map(Process, num_parallel_calls=10)\n # We batch the output of the dataset into a very large Tensor, then cache\n # it in memory.\n dataset = dataset.batch(p.num_db_objects)\n dataset = dataset.apply(tf.stateless_cache_dataset()).repeat()\n\n iterator = dataset.make_one_shot_iterator()\n input_batch = iterator.get_next()\n\n (db_points_xyz, db_points_feature, db_points_mask, db_bboxes, db_labels,\n db_difficulties) = input_batch\n return py_utils.NestedMap(\n points_xyz=db_points_xyz,\n points_feature=db_points_feature,\n points_mask=db_points_mask,\n bboxes_3d=db_bboxes,\n labels=db_labels,\n difficulties=db_difficulties)\n\n def _CreateExampleFilter(self, db):\n \"\"\"Construct db example filter.\n\n Args:\n db: NestedMap of the following Tensors: points_mask - [N, P] - The points\n mask for every object in the database, where N is the number of objects\n and P is the maximum number of points per object. labels - [N] - int32\n Label for each object in the database. difficulties - [N] - int32\n Difficulty for each label in the database.\n\n Returns:\n A [N] boolean Tensor for each object in the database, True if\n that corresponding object passes the filter.\n \"\"\"\n p = self.params\n db_points_mask = db.points_mask\n db_label = db.labels\n db_difficulty = db.difficulties\n\n num_objects_in_database = tf.shape(db_points_mask)[0]\n\n # Filter number of objects.\n points_per_object = tf.reduce_sum(tf.cast(db_points_mask, tf.int32), axis=1)\n example_filter = points_per_object >= p.filter_min_points\n if p.filter_max_points:\n example_filter = tf.math.logical_and(\n example_filter, points_per_object <= p.filter_max_points)\n\n if p.difficulty_sampling_probability is not None:\n # Sample db based on difficulity of each example.\n sampling_prob = p.difficulty_sampling_probability\n db_difficulty_probability = tf.zeros_like(db_difficulty, dtype=tf.float32)\n for difficulty_idx, difficulty_prob in enumerate(sampling_prob):\n db_difficulty_probability += (\n tf.cast(tf.equal(db_difficulty, difficulty_idx), tf.float32) *\n difficulty_prob)\n\n sampled_filter = tf.random.uniform(\n tf.shape(example_filter),\n minval=0,\n maxval=1,\n dtype=tf.float32,\n seed=p.random_seed)\n sampled_filter = sampled_filter < db_difficulty_probability\n example_filter &= sampled_filter\n else:\n # Filter out db examples below min difficulty\n example_filter = tf.math.logical_and(\n example_filter, db_difficulty >= p.filter_min_difficulty)\n\n example_filter = tf.reshape(example_filter, [num_objects_in_database])\n db_label = tf.reshape(db_label, [num_objects_in_database])\n if p.class_sampling_probability is not None:\n # Sample example based on its class probability.\n sampling_prob = p.class_sampling_probability\n db_class_probability = tf.zeros_like(db_label, dtype=tf.float32)\n\n for class_idx, class_prob in enumerate(sampling_prob):\n db_class_probability += (\n tf.cast(tf.equal(db_label, class_idx), tf.float32) * class_prob)\n\n sampled_filter = tf.random.uniform(\n tf.shape(example_filter),\n minval=0,\n maxval=1,\n dtype=tf.float32,\n seed=p.random_seed)\n sampled_filter = sampled_filter < db_class_probability\n example_filter &= sampled_filter\n elif p.label_filter:\n # Filter based on labels.\n # Create a label filter where all is false\n valid_labels = tf.constant(p.label_filter)\n label_mask = tf.reduce_any(\n tf.equal(db_label[..., tf.newaxis], valid_labels), axis=1)\n example_filter = tf.math.logical_and(example_filter, label_mask)\n return example_filter\n\n # TODO(vrv): Create an overlap filter that also ensures that boxes don't\n # overlap with groundtruth points, so that the scenes are more plausible.\n def _FilterIndices(self, gt_bboxes_3d, db_bboxes, db_idx):\n \"\"\"Identify database boxes that don't overlap with other boxes.\"\"\"\n # We accomplish overlap filtering by first computing the pairwise 3D IoU of\n # all boxes (concatenated) as a way of computing pairwise box overlaps.\n num_gt_bboxes = tf.shape(gt_bboxes_3d)[0]\n filtered_bboxes = tf.gather(db_bboxes, db_idx)\n all_bboxes = tf.concat([gt_bboxes_3d, filtered_bboxes], axis=0)\n pairwise_overlap = ops.pairwise_iou3d(all_bboxes, all_bboxes)\n\n # We now have an M x M matrix with 1s on the diagonal and non-zero entries\n # whenever a box collides with another.\n #\n # To increase the number of boxes selected, we filter the upper triangular\n # entries so that the boxes are chosen greedily: boxes with smaller indices\n # will be selected before later boxes, because earlier boxes will not appear\n # to collide with later boxes, but later boxes may collide with earlier\n # ones.\n pairwise_overlap = tf.linalg.band_part(pairwise_overlap, -1, 0)\n\n # We compute the sum of the IoU overlaps for all database boxes.\n db_overlap_sums = tf.reduce_sum(pairwise_overlap[num_gt_bboxes:], axis=1)\n\n # Those boxes that don't overlap with any other boxes will only have\n # a 1.0 IoU with itself.\n non_overlapping_boxes = tf.reshape(db_overlap_sums <= 1., [-1])\n\n # Filter to select only those object ids that pass this filter.\n db_idx = tf.boolean_mask(db_idx, non_overlapping_boxes)\n return db_idx\n\n def TransformFeatures(self, features):\n p = self.params\n\n tf.logging.info('Loading groundtruth database at %s' %\n (p.groundtruth_database))\n db = p.groundtruth_database.Instantiate().BuildDataSource(self._ReadDB).data\n\n original_features_shape = tf.shape(features.lasers.points_feature)\n\n # Compute the number of bboxes to augment.\n num_bboxes_in_scene = tf.reduce_sum(\n tf.cast(features.labels.bboxes_3d_mask, tf.int32))\n max_bboxes = tf.shape(features.labels.bboxes_3d_mask)[0]\n num_augmented_bboxes = tf.minimum(max_bboxes - num_bboxes_in_scene,\n p.max_augmented_bboxes)\n\n # Compute an object index over all objects in the database.\n num_objects_in_database = tf.shape(db.points_xyz)[0]\n db_idx = tf.range(num_objects_in_database)\n\n # Find those indices whose examples pass the filters, and select only those\n # indices.\n example_filter = self._CreateExampleFilter(db)\n db_idx = tf.boolean_mask(db_idx, example_filter)\n\n # At this point, we might still have a large number of object candidates,\n # from which we only need a sample.\n # To reduce the amount of computation, we randomly subsample to slightly\n # more than we want to augment.\n db_idx = tf.random.shuffle(\n db_idx, seed=p.random_seed)[0:num_augmented_bboxes * 5]\n\n # After filtering, further filter out the db boxes that would occlude with\n # other boxes (including other database boxes).\n #\n # Gather the filtered ground truth bounding boxes according to the mask, so\n # we can compute overlaps below.\n gt_bboxes_3d_mask = tf.cast(features.labels.bboxes_3d_mask, tf.bool)\n gt_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d, gt_bboxes_3d_mask)\n gt_bboxes_3d = py_utils.HasShape(gt_bboxes_3d, [num_bboxes_in_scene, 7])\n db_idx = self._FilterIndices(gt_bboxes_3d, db.bboxes_3d, db_idx)\n\n # From the filtered object ids, select only as many boxes as we need.\n shuffled_idx = db_idx[0:num_augmented_bboxes]\n num_augmented_bboxes = tf.shape(shuffled_idx)[0]\n\n # Gather based off the indices.\n sampled_points_xyz = tf.gather(db.points_xyz, shuffled_idx)\n sampled_points_feature = tf.gather(db.points_feature, shuffled_idx)\n sampled_mask = tf.reshape(\n tf.gather(db.points_mask, shuffled_idx),\n [num_augmented_bboxes, p.max_num_points_per_bbox])\n sampled_bboxes = tf.gather(db.bboxes_3d, shuffled_idx)\n sampled_labels = tf.gather(db.labels, shuffled_idx)\n\n # Mask points/features.\n sampled_points_xyz = tf.boolean_mask(sampled_points_xyz, sampled_mask)\n sampled_points_feature = tf.boolean_mask(sampled_points_feature,\n sampled_mask)\n\n # Flatten before concatenation with ground truths.\n sampled_points_xyz = tf.reshape(sampled_points_xyz, [-1, 3])\n sampled_points_feature = tf.reshape(sampled_points_feature,\n [-1, original_features_shape[-1]])\n sampled_bboxes = tf.reshape(sampled_bboxes, [-1, 7])\n\n # Concatenate the samples with the ground truths.\n if 'points_padding' in features.lasers:\n points_mask = tf.cast(1. - features.lasers.points_padding, tf.bool)\n # Densify the original points.\n dense_points_xyz = tf.boolean_mask(features.lasers.points_xyz,\n points_mask)\n dense_points_feature = tf.boolean_mask(features.lasers.points_feature,\n points_mask)\n\n # Concatenate the dense original points with our new sampled oints.\n points_xyz = tf.concat([dense_points_xyz, sampled_points_xyz], axis=0)\n points_feature = tf.concat([dense_points_feature, sampled_points_feature],\n axis=0)\n original_points_shape = tf.shape(features.lasers.points_xyz)\n features.lasers.points_xyz = py_utils.PadOrTrimTo(points_xyz,\n original_points_shape)\n features.lasers.points_feature = py_utils.PadOrTrimTo(\n points_feature, original_features_shape)\n # Compute the modified mask / padding.\n final_points_mask = py_utils.PadOrTrimTo(\n tf.ones(tf.shape(points_xyz)[0]),\n tf.shape(features.lasers.points_padding))\n features.lasers.points_padding = 1. - final_points_mask\n else:\n points_xyz = tf.concat([features.lasers.points_xyz, sampled_points_xyz],\n axis=0)\n points_feature = tf.concat(\n [features.lasers.points_feature, sampled_points_feature], axis=0)\n features.lasers.points_xyz = points_xyz\n features.lasers.points_feature = points_feature\n\n # Reconstruct a new, dense, bboxes_3d vector that includes the filtered\n # groundtruth bounding boxes followed by the database augmented boxes.\n bboxes_3d = tf.concat([gt_bboxes_3d, sampled_bboxes], axis=0)\n bboxes_3d = py_utils.PadOrTrimTo(bboxes_3d, [max_bboxes, 7])\n features.labels.bboxes_3d = bboxes_3d\n bboxes_3d_mask = tf.ones(\n num_bboxes_in_scene + num_augmented_bboxes, dtype=tf.float32)\n features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(\n bboxes_3d_mask, [max_bboxes])\n\n gt_labels = tf.boolean_mask(features.labels.labels, gt_bboxes_3d_mask)\n gt_labels = py_utils.HasShape(gt_labels, [num_bboxes_in_scene])\n\n labels = tf.concat([gt_labels, sampled_labels], axis=0)\n features.labels.labels = py_utils.PadOrTrimTo(labels, [max_bboxes])\n\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass FrustumDropout(Preprocessor):\n \"\"\"Randomly drops out points in a frustum.\n\n All points are first converted to spherical coordinates, and then a point\n is randomly selected. All points in the frustum around that point within\n a given phi, theta angle width and distance to the original greater than\n a given value are dropped with probability = 1 - keep_prob.\n\n Here, we can specify whether the dropped frustum is the union or intersection\n of the phi and theta angle filters.\n\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, K]\n\n Optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n lasers.points_xyz, lasers.points_feature, lasers.points_padding with points\n randomly dropped out.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('theta_width', 0.03, 'Theta angle width for dropping points.')\n p.Define('phi_width', 0.0, 'Phi angle width for dropping points.')\n p.Define(\n 'distance', 0.0, 'Drop points that have larger distance to the'\n 'origin than the value given here.')\n p.Define(\n 'keep_prob', 0.0, 'keep_prob: 1. = drop no points in the Frustum,'\n '0 = drop all points, between 0 and 1 = down sample the points.')\n p.Define(\n 'drop_type', 'union', 'Drop either the union or intersection of '\n 'phi width and theta width.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.phi_width < 0:\n raise ValueError('phi_width must be >= 0, phi_width={}'.format(\n p.phi_width))\n if p.theta_width < 0:\n raise ValueError('theta_width must be >= 0, theta_width={}'.format(\n p.theta_width))\n if p.distance < 0:\n raise ValueError('distance must be >= 0, distance={}'.format(p.distance))\n if p.keep_prob < 0 or p.keep_prob > 1:\n raise ValueError('keep_prob must be >= 0 and <=1, keep_prob={}'.format(\n p.keep_prob))\n if p.drop_type not in ['union', 'intersection']:\n raise ValueError('drop_type must be union or intersection ,'\n 'drop_type={}'.format(p.drop_type))\n\n def TransformFeatures(self, features):\n p = self.params\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n if 'points_padding' in features.lasers:\n points_padding = features.lasers.points_padding\n else:\n points_padding = None\n\n if points_padding is not None:\n points_mask = tf.cast(1 - points_padding, tf.bool)\n num_total_points = py_utils.GetShape(points_mask)[0]\n real_points_idx = tf.boolean_mask(\n tf.range(0, num_total_points, dtype=tf.int32), points_mask)\n num_points = py_utils.GetShape(real_points_idx)[0]\n else:\n points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)\n num_total_points = py_utils.GetShape(points_mask)[0]\n num_points = py_utils.GetShape(points_xyz)[0]\n\n r, theta, phi = tf.unstack(\n geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)\n\n def _PickRandomPoint():\n point_idx = tf.random.uniform((),\n minval=0,\n maxval=num_points,\n dtype=tf.int32)\n if points_padding is not None:\n point_idx = real_points_idx[point_idx]\n return point_idx\n\n # Pick a point at random and drop all points that are near that point in the\n # frustum for distance larger than r; repeat this for both theta and phi.\n if p.theta_width > 0:\n theta_half_width = p.theta_width / 2.\n point_idx = _PickRandomPoint()\n # Points within theta width and further than distance will be dropped.\n theta_drop_filter = ((theta < (theta[point_idx] + theta_half_width)) &\n (theta > (theta[point_idx] - theta_half_width)) &\n (r > p.distance))\n else:\n theta_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)\n\n if p.phi_width > 0:\n phi_half_width = p.phi_width / 2.\n point_idx = _PickRandomPoint()\n # Points within phi width and further than distance will be dropped.\n phi_drop_filter = ((phi < (phi[point_idx] + phi_half_width)) &\n (phi >\n (phi[point_idx] - phi_half_width)) & (r > p.distance))\n else:\n phi_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)\n\n # Create drop_filter by combining filters. This contains a filter for the\n # points to be removed. One can use the intersection method to limit the\n # dropped points be within both phi and theta ranges.\n if p.drop_type == 'union':\n drop_filter = theta_drop_filter | phi_drop_filter\n elif p.drop_type == 'intersection':\n drop_filter = theta_drop_filter & phi_drop_filter\n\n if p.keep_prob == 0:\n # Drop all points in drop_filter.\n down_sampling_filter = drop_filter\n else:\n # Randomly drop points in drop_filter based on keep_prob.\n sampling_drop_filter = tf.random.uniform([num_total_points],\n minval=0,\n maxval=1,\n dtype=tf.float32)\n # Points greater than the threshold (keep_prob) will be dropped.\n sampling_drop_filter = sampling_drop_filter > p.keep_prob\n\n # Instead of dropping all points in the frustum, we drop out points\n # that are in the selected frustum (drop_filter).\n down_sampling_filter = drop_filter & sampling_drop_filter\n\n points_mask &= ~down_sampling_filter\n\n if points_padding is not None:\n features.lasers.points_padding = 1 - tf.cast(points_mask, tf.float32)\n else:\n features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)\n features.lasers.points_feature = tf.boolean_mask(points_feature,\n points_mask)\n\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RepeatPreprocessor(Preprocessor):\n \"\"\"Repeat a preprocessor multiple times.\n\n This preprocessor takes a preprocessor as a subprocessor and apply the\n subprocessor to features multiple times (repeat_count).\n\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('repeat_count', 1, 'Number of times the subprocessor is applied to'\n ' features.')\n p.Define('subprocessor', None, 'One of the input preprocessors.')\n\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.subprocessor is None:\n raise ValueError('No subprocessor was specified for RepeatPreprocessor.')\n if p.repeat_count < 0 or not isinstance(p.repeat_count, int):\n raise ValueError(\n 'repeat_count must be >= 0 and int, repeat_count={}'.format(\n p.repeat_count))\n\n self.CreateChild('subprocessor', p.subprocessor)\n\n def TransformFeatures(self, features):\n p = self.params\n for _ in range(p.repeat_count):\n features = self.subprocessor.FPropDefaultTheta(features)\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n for _ in range(p.repeat_count):\n shapes = self.subprocessor.TransformShapes(shapes)\n\n return shapes\n\n def TransformDTypes(self, dtypes):\n p = self.params\n for _ in range(p.repeat_count):\n dtypes = self.subprocessor.TransformDTypes(dtypes)\n\n return dtypes\n\n\nclass RandomApplyPreprocessor(Preprocessor):\n \"\"\"Randomly apply a preprocessor with certain probability.\n\n This preprocessor takes a preprocessor as a subprocessor and apply the\n subprocessor to features with certain probability.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('prob', 1.0, 'The probability the subprocessor being executed.')\n p.Define('subprocessor', None, 'Params for an input preprocessor.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.subprocessor is None:\n raise ValueError('No subprocessor was specified for RepeatPreprocessor.')\n if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float):\n raise ValueError(\n 'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob))\n\n self.CreateChild('subprocessor', p.subprocessor)\n\n def TransformFeatures(self, features):\n p = self.params\n choice = tf.random.uniform(\n (), minval=0.0, maxval=1.0, seed=p.random_seed) <= p.prob\n # Features is passed downstream and may be modified, we make deep copies\n # here to use with tf.cond to avoid having tf.cond access updated\n # versions. Note that we need one copy for each branch in case the branches\n # further modify features.\n features_0, features_1 = features.DeepCopy(), features.DeepCopy()\n features = tf.cond(choice,\n lambda: self.subprocessor.TransformFeatures(features_0),\n lambda: features_1)\n return features\n\n def TransformShapes(self, shapes):\n shapes_transformed = self.subprocessor.TransformShapes(shapes)\n\n if not shapes.IsCompatible(shapes_transformed):\n raise ValueError(\n 'NestedMap structures are different between shapes and transformed'\n 'shapes. Original shapes: {}. Transformed shapes: {}'.format(\n shapes, shapes_transformed))\n\n def IsCompatibleWith(a, b):\n return a.is_compatible_with(b)\n\n if not all(\n py_utils.Flatten(\n py_utils.Transform(IsCompatibleWith, shapes, shapes_transformed))):\n raise ValueError(\n 'Shapes after transformation - {} are different from original '\n 'shapes - {}.'.format(shapes_transformed, shapes))\n\n return shapes\n\n def TransformDTypes(self, dtypes):\n transformed_dtypes = self.subprocessor.TransformDTypes(dtypes)\n if transformed_dtypes != dtypes:\n raise ValueError(\n 'DTypes after transformation of preprocessor - {} should be '\n 'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes,\n transformed_dtypes))\n return dtypes\n\n\nclass ConstantPreprocessor(Preprocessor):\n \"\"\"Preprocessor that produces specified constant values in a nested output.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'constants', py_utils.NestedMap(),\n 'Map of key names to numpy arrays of constant values to use. '\n 'Must be a NestedMap or dict convertible to NestedMap.')\n return p\n\n def TransformFeatures(self, features):\n constants = py_utils.NestedMap(self.params.constants)\n features.update(constants.Transform(tf.constant))\n return features\n\n def TransformShapes(self, shapes):\n constants = py_utils.NestedMap(self.params.constants)\n shapes.update(\n constants.Transform(lambda x: tf.TensorShape(np.array(x).shape)))\n return shapes\n\n def TransformDTypes(self, dtypes):\n constants = py_utils.NestedMap(self.params.constants)\n dtypes.update(constants.Transform(lambda x: tf.as_dtype(np.array(x).dtype)))\n return dtypes\n\n\nclass IdentityPreprocessor(Preprocessor):\n \"\"\"Preprocessor that passes all inputs through.\n\n This may be useful for situations where one wants a 'no-op' preprocessor, such\n as being able to randomly choose to do nothing among a set of preprocessor\n choices.\n \"\"\"\n\n def TransformFeatures(self, features):\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomChoicePreprocessor(Preprocessor):\n \"\"\"Randomly applies a preprocessor with specified weights.\n\n The input at features[p.weight_tensor_key] must be a floating point vector\n Tensor whose length matches the number of subprocessors to select among. The\n values in that Tensor are interpreted as relative weights.\n\n For example, if p.subprocessors = [preprocessor1, preprocessor2] and the\n weights are [1., 2.], then preprocessor1 will be applied with probability 1/3,\n and preprocessor2 will be applied with probability 2/3.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'subprocessors', [],\n 'Params for preprocessors. Each value should be a tuple of '\n '(Preprocessor.Params(), BaseSchedule.Params()), where the schedule '\n 'defines the weights to use over time.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if not p.subprocessors:\n raise ValueError('No subprocessors were specified.')\n\n subprocessors, schedules = zip(*p.subprocessors)\n\n def _FilterNonSchedules(v):\n return not issubclass(getattr(v, 'cls', False), schedule.BaseSchedule)\n\n invalid_values = [_FilterNonSchedules(s) for s in schedules]\n if any(invalid_values):\n raise TypeError('Not all schedule values were schedules: '\n f'{invalid_values}')\n\n self.CreateChildren('subprocessors', list(subprocessors))\n self.CreateChildren('schedules', list(schedules))\n\n def TransformFeatures(self, features):\n p = self.params\n\n choice_list = []\n weight_list = []\n\n # Pass a unique copy of the input to each branch, in case the\n # subprocessor destructively modifies the features in unexpected ways.\n for subp, sched in zip(self.subprocessors, self.schedules):\n choice_list.append(\n lambda subp=subp: subp.TransformFeatures(features.DeepCopy()))\n weight_list.append(sched.Value())\n\n weight_tensor = tf.stack(weight_list)\n chosen_bin = tf.random.categorical(\n tf.math.log(weight_tensor[tf.newaxis]),\n 1,\n seed=p.random_seed,\n dtype=tf.int32)[0, 0]\n features = tf.switch_case(chosen_bin, branch_fns=choice_list)\n return features\n\n def TransformShapes(self, shapes):\n transformed_shapes = [\n subp.TransformShapes(shapes.DeepCopy()) for subp in self.subprocessors\n ]\n if not all(transformed_shapes[0] == curr for curr in transformed_shapes):\n raise ValueError('Shapes after transformations were not identical: '\n f'{transformed_shapes}')\n return transformed_shapes[0]\n\n def TransformDTypes(self, dtypes):\n transformed_dtypes = [\n subp.TransformDTypes(dtypes.DeepCopy()) for subp in self.subprocessors\n ]\n if not all(transformed_dtypes[0] == curr for curr in transformed_dtypes):\n raise ValueError('DTypes after transformations were not identical: '\n f'{transformed_dtypes}')\n return transformed_dtypes[0]\n\n\nclass SparseSampler(Preprocessor):\n \"\"\"Fused SparseCenterSelector and SparseCellGatherFeatures.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n\n Adds the following features:\n anchor_centers - [num_centers, 3] - Floating point output containing the\n center (x, y, z) locations for tiling anchor boxes.\n\n cell_center_xyz - [num_centers, 3] - Floating point output containing\n the center (x, y, z) locations for each cell to featurize.\n\n cell_center_padding - [num_centers] - 0/1 padding for each center.\n\n cell_points_xyz - [num_centers, num_neighbors, 3] - Floating point\n output containing the (x, y, z) locations for each point for a given\n center.\n\n cell_feature - [num_centers, num_neighbors, F] - Floating point output\n containing the features for each point for a given center.\n\n cell_points_padding - [num_centers, num_neighbors] - 0/1 padding\n for the points in each cell.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('center_selector', 'farthest', 'Method to sample centers. '\n 'Valid options - uniform, farthest.')\n p.Define('neighbor_sampler', 'uniform', 'Method to select neighbors. '\n 'Valid options - uniform, closest.')\n p.Define('num_centers', 16, 'The number of centers to sample.')\n p.Define(\n 'features_preparation_layers', [],\n 'A list of Params for layers to run on the features before '\n 'performing farthest point sampling. For example, one may wish to '\n 'drop points out of frustum for KITTI before selecting centers. '\n 'Note that these layers will not mutate the original features, '\n 'instead, a copy will be made.')\n p.Define(\n 'keep_z_range', (-np.inf, np.inf),\n 'Only points that have z coordinates within this range are kept. '\n 'Approximate ground-removal can be performed by specifying a '\n 'lower-bound on the z-range.')\n p.Define('num_neighbors', 64, 'Sample these many points within the '\n 'neighorhood.')\n p.Define(\n 'max_distance', 1.0, 'Points with L2 distances from a center '\n 'larger than this threshold are not considered to be in the '\n 'neighborhood.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.features_preparation_layers:\n self.CreateChildren('features_preparation_layers',\n p.features_preparation_layers)\n\n def TransformFeatures(self, features):\n p = self.params\n n, m = p.num_centers, p.num_neighbors\n\n prepared_features = features.DeepCopy()\n if p.features_preparation_layers:\n for prep_layer in self.features_preparation_layers:\n prepared_features = prep_layer.FPropDefaultTheta(prepared_features)\n\n points_data = prepared_features.lasers\n points = py_utils.HasShape(points_data.points_xyz, [-1, 3])\n\n if 'points_padding' in points_data:\n points_mask = 1 - points_data.points_padding\n points = tf.boolean_mask(points, points_mask)\n\n # If num_points < num_centers, pad points to have at least num_centers\n # points.\n num_points = tf.shape(points)[0]\n required_num_points = tf.maximum(num_points, p.num_centers)\n zeros = tf.zeros([required_num_points - num_points, 3])\n points = tf.concat([points, zeros], axis=0)\n\n num_seeded_points = points_data.get('num_seeded_points', 0)\n\n neighbor_algorithm = 'auto'\n # Based on benchmarks, the hash solution works better when the number of\n # centers is >= 16 and there are at least 10k points per point cloud.\n if p.num_centers >= 16:\n neighbor_algorithm = 'hash'\n\n centers, center_paddings, indices, indices_paddings = ops.sample_points(\n points=tf.expand_dims(points, 0),\n points_padding=tf.zeros([1, required_num_points], tf.float32),\n num_seeded_points=num_seeded_points,\n center_selector=p.center_selector,\n neighbor_sampler=p.neighbor_sampler,\n neighbor_algorithm=neighbor_algorithm,\n num_centers=p.num_centers,\n center_z_min=p.keep_z_range[0],\n center_z_max=p.keep_z_range[1],\n num_neighbors=p.num_neighbors,\n max_distance=p.max_distance,\n random_seed=p.random_seed if p.random_seed else -1)\n centers = py_utils.HasShape(centers, [1, n])[0, :]\n center_paddings = py_utils.HasShape(center_paddings, [1, n])[0, :]\n indices = py_utils.HasShape(indices, [1, n, m])[0, :]\n indices_paddings = py_utils.HasShape(indices_paddings, [1, n, m])[0, :]\n features.cell_center_padding = center_paddings\n features.cell_center_xyz = py_utils.HasShape(\n tf.gather(points, centers), [n, 3])\n features.anchor_centers = features.cell_center_xyz\n features.cell_points_xyz = py_utils.HasShape(\n tf.gather(points, indices), [n, m, 3])\n features.cell_feature = tf.gather(points_data.points_feature, indices)\n features.cell_points_padding = indices_paddings\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n n, m, f = p.num_centers, p.num_neighbors, shapes.lasers.points_feature[-1]\n shapes.anchor_centers = tf.TensorShape([n, 3])\n shapes.cell_center_padding = tf.TensorShape([n])\n shapes.cell_center_xyz = tf.TensorShape([n, 3])\n shapes.cell_points_xyz = tf.TensorShape([n, m, 3])\n shapes.cell_feature = tf.TensorShape([n, m, f])\n shapes.cell_points_padding = tf.TensorShape([n, m])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_centers = tf.float32\n dtypes.cell_center_padding = tf.float32\n dtypes.cell_center_xyz = tf.float32\n dtypes.cell_points_xyz = tf.float32\n dtypes.cell_feature = tf.float32\n dtypes.cell_points_padding = tf.float32\n return dtypes\n",
"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common layers.\"\"\"\n\nimport copy\nimport math\nimport numbers\nimport lingvo.compat as tf\nfrom lingvo.core import activations\nfrom lingvo.core import base_layer\nfrom lingvo.core import bn_layers\nfrom lingvo.core import builder_layers\nfrom lingvo.core import computation_cost\nfrom lingvo.core import conv_layers_with_time_padding\nfrom lingvo.core import pruning_utils\nfrom lingvo.core import py_utils\nfrom lingvo.core import quant_utils\nfrom lingvo.core import recurrent\nfrom lingvo.core import schedule\nfrom lingvo.core import summary_utils\nfrom lingvo.core import symbolic\nfrom lingvo.core import tshape\nimport numpy as np\nimport sympy\n\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops import inplace_ops\n# pylint:enable=g-direct-tensorflow-import\n\n\nclass DeconvLayer(base_layer.BaseLayer):\n \"\"\"Deconv (transposed conv2d) layer.\n\n DeconvLayer is different from ConvTransposeLayer in that\n DeconvLayer does not support padding and biasing. Hence,\n it's simpler and more basic than ConvTransposeLayer.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'filter_shape', (0, 0, 0, 0),\n 'Filter shape. Must be a sequence of length 4. Elements are in'\n ' the order of height, width, out_channel, in_channel.')\n p.Define(\n 'filter_stride', (0, 0),\n 'Filter stride to use. Must be a pair of ints. The first int'\n ' specifies the stride on the height dimension. The second int'\n ' specifies the stride on the width dimension.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert len(p.filter_shape) == 4\n assert len(p.filter_stride) == 2\n assert all(x > 0 for x in p.filter_shape)\n assert all(x > 0 for x in p.filter_stride)\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n w_pc = py_utils.WeightParams(\n shape=p.filter_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('w', w_pc)\n\n def OutShape(self, in_shape):\n \"\"\"Compute the output shape given the input shape.\"\"\"\n p = self.params\n t_stride = p.filter_stride[0]\n f_stride = p.filter_stride[1]\n return tf.stack([\n in_shape[0], in_shape[1] * t_stride, in_shape[2] * f_stride,\n p.filter_shape[2]\n ])\n\n def _ApplyConv(self, theta, inputs):\n p = self.params\n w = theta.w\n strides = [1, p.filter_stride[0], p.filter_stride[1], 1]\n # TODO(miachen): remove casting once tf.nn.conv2d supports tf.float64.\n assert inputs.dtype == w.dtype\n dtype = inputs.dtype\n if dtype != tf.float32:\n inputs = tf.cast(inputs, tf.float32)\n w = tf.cast(w, tf.float32)\n # TODO(zhifengc): Try some better way to do Deconv. Search for\n # \"resize-convolution\".\n out = tf.nn.conv2d_transpose(\n inputs,\n w,\n output_shape=self.OutShape(tf.shape(inputs)),\n strides=strides,\n padding='SAME')\n if dtype != tf.float32:\n out = tf.cast(out, dtype)\n return py_utils.HasShape(out, [-1, -1, -1, p.filter_shape[2]])\n\n def FProp(self, theta, inputs):\n \"\"\"Apply deconvolution to inputs.\n\n Args:\n theta: A NestedMap object containing weights' values of this layer and its\n children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, height,\n width, channel].\n\n Returns:\n outputs. outputs is expected to have shape [batch, height * height_stride,\n width * width_stride, out_channel].\n \"\"\"\n p = self.params\n inputs = py_utils.HasShape(inputs, [-1, -1, -1, p.filter_shape[3]])\n return self._ApplyConv(theta, inputs)\n\n\n# A subset of activation functions are supported by TFLite as fused activation\n# functions with a preceding matmul or conv. If this is the case, then they\n# require special treatment for quantization.\n_TFLITE_FUSED_ACTIVATION_NAMES = (\n 'RELU',\n 'RELU6',\n)\n\nLOG_SCALE_CLAMP_BOUND = 20.0\n\n\nclass IdentityLayer(base_layer.BaseLayer):\n \"\"\"Identity layer, adds name and propagates its input.\"\"\"\n\n def FProp(self, theta, inputs, *args):\n \"\"\"Identity mapping.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The input tensor or the input NestedMap.\n *args: Arguments to be ignored.\n\n Returns:\n Tensor with the same shape and type of inputs.\n \"\"\"\n p = self.params\n with tf.name_scope(p.name):\n return tf.nest.map_structure(tf.identity, inputs)\n\n @classmethod\n def FPropMeta(cls, p, inputs, *args):\n py_utils.CheckShapes((inputs,))\n return py_utils.NestedMap(flops=0, out_shapes=(inputs,))\n\n\n# TODO(yonghui/jonathanasdf): Remove the forwarded links.\n_ComputeConvOutputShape = conv_layers_with_time_padding.ComputeConvOutputShape\n_ComputeConvOutputPadding = (\n conv_layers_with_time_padding.ComputeConvOutputPadding)\nBatchNormLayer = bn_layers.BatchNormLayer\nBatchNormLayerNoPadding = bn_layers.BatchNormLayerNoPadding\nAddingAccumulator = bn_layers.AddingAccumulator\n\n\nclass BaseConv2DLayer(quant_utils.QuantizableLayer):\n \"\"\"Base class for 2D convolution layers.\n\n Has support for optional batch-normalization, activation and sequence\n padding.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'filter_shape', (0, 0, 0, 0),\n 'Filter shape. Must be a sequence of length 4. Elements are in'\n ' the order of height (time), width (frequency), in_channel,'\n ' out_channel. When causal_convolution is True, filter_shape[1]'\n ' is the actual number of trained weights in the time dimension'\n ' of the kernel.')\n p.Define(\n 'filter_stride', (0, 0),\n 'Filter stride to use. Must be a pair of ints. The first int'\n ' specifies the stride on the time dimension. The second int'\n ' specifies the stride on the frequency dimension.')\n p.Define(\n 'dilation_rate', (1, 1),\n 'If > 1, dilation rate for atrous convolution. '\n 'Must be a pair of ints. '\n 'The first int specifies the dilation rate on the time dimension. '\n 'The second int specifies the dilation rate on the frequency '\n 'dimension. '\n 'If any value of dilation_rate is > 1, then all values of strides '\n 'must be 1.')\n p.Define(\n 'activation', 'RELU',\n 'Activation function to use. Options are RELU, RELU6, SIGMOID, '\n 'TANH, NONE.')\n p.Define('bias', False, 'Whether or not to apply a bias before activation.')\n p.Define('batch_norm', True, 'Whether or not to apply batch norm.')\n p.Define(\n 'bn_decay', 0.999,\n 'Decay in updating the mean and variance moving average used in'\n ' batch normalization.')\n p.Define(\n 'bn_fold_weights', None,\n 'Fold the batch norm parameters into the convolution weights at '\n 'eval/inference time as per https://arxiv.org/pdf/1712.05877.pdf. '\n 'Requires that batch_norm be True and is incompatible with some other '\n 'parameters (conv_last=True).')\n p.Define(\n 'causal_convolution', False,\n 'If true, conv layer output only depends on time steps in'\n ' the past.')\n p.Define(\n 'conv_last', False,\n 'If true, apply the convolution transformation as the last step, '\n 'i.e., first apply batch normalization on the input, followed '\n 'by activation, and finally the convolution. '\n 'Otherwise, apply convolution first, followed by batch '\n 'normalization and activation. Not compatible with bn_fold_weights '\n 'or quantization.')\n p.Define(\n 'weight_norm', False,\n 'If true, apply weight normalization to weights as proposed by'\n ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')\n p.Define(\n 'disable_activation_quantization', False,\n 'Disables the quantization tracking/clamping for the output '\n 'activation. This is most often used in conjunction with a concat '\n 'layer which needs to have a merged set of statistics.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert len(p.filter_shape) == 4\n assert len(p.filter_stride) == 2\n assert len(p.dilation_rate) == 2\n assert all(x > 0 for x in p.filter_stride)\n assert all(x > 0 for x in p.dilation_rate)\n if any(x > 1 for x in p.dilation_rate):\n assert all(x == 1 for x in p.filter_stride)\n # Bias is not needed with batch_norm=True.\n if p.batch_norm:\n assert not p.bias\n assert (p.activation == 'NONE' or activations.IsSupported(p.activation))\n\n if p.batch_norm:\n # batch normalization dimension is number of input channels\n # (filter_shape[2]) if we apply batch_norm on input and convolution\n # in the end, number of output channels otherwise.\n bn_dim = p.filter_shape[2] if p.conv_last else self.output_channels\n bn_params = BatchNormLayer.Params().Set(\n dim=bn_dim, decay=p.bn_decay, name=p.name, params_init=p.params_init)\n self.CreateChild('bn', bn_params)\n\n if self._is_bn_folded:\n assert p.batch_norm, 'bn_fold_weights requires batch_norm = True'\n assert not p.conv_last, 'bn_fold_weights requires conv_last = False'\n\n # TODO(yonghui): implement the variational noise logic.\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n w_pc = py_utils.WeightParams(\n shape=p.filter_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('w', w_pc)\n if p.bias:\n self.CreateVariable(\n 'b',\n py_utils.WeightParams(\n shape=[self.output_channels],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars']))\n if p.weight_norm:\n self.CreateVariable(\n 'g',\n py_utils.WeightParams(\n shape=self.filter_output_shape,\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars']))\n\n if not p.disable_activation_quantization:\n self.TrackQTensor('activation')\n if (p.activation not in _TFLITE_FUSED_ACTIVATION_NAMES and\n p.activation != 'NONE'):\n self.TrackQTensor('pre_activation')\n\n def _CreateChildrenVariables(self):\n # Backwards compatibility: manually call child.InstantiateVariables()\n # outside of tf.variable_scope(p.name).\n if self.params.batch_norm:\n self.bn.InstantiateVariables()\n super()._CreateChildrenVariables()\n\n @property\n def output_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n # Normal convolution filter shape is [..., out_channels].\n p = self.params\n return p.filter_shape[-1]\n\n @property\n def filter_output_shape(self):\n \"\"\"Final dims of the filter corresponding to the output channels.\n\n Returns:\n A one (standard conv) or two (depthwise conv) element shape representing\n the final dimensions of the filter weights that are output channel\n specific for this layer. This shape is needed for any arithmetic that\n needs to convert between a linear list of filter weights and the\n arrangement in the actual filter.\n \"\"\"\n # Standard convolution has all output channels in the last dim.\n p = self.params\n return [p.filter_shape[-1]]\n\n @property\n def _is_bn_folded(self):\n \"\"\"Whether batchnorm folded weights are effectively enabled.\"\"\"\n p = self.params\n if not p.batch_norm:\n return False\n return (p.bn_fold_weights or\n (p.bn_fold_weights is None and p.qdomain.default is not None))\n\n def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,\n padding_algorithm, data_format):\n \"\"\"Evaluates the lower level convolution kernel.\n\n Args:\n inputs: As to tf.nn.convolution.\n filter_w: As to tf.nn.depthwise_conv2d.\n strides: As to tf.nn.convolution.\n dilation_rate: As to tf.nn.convolution.\n padding_algorithm: As to tf.nn.convolution (padding argument).\n data_format: As to tf.nn.convolution.\n\n Returns:\n Convolution kernel output.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def OutputShape(cls, params, in_shape):\n return _ComputeConvOutputShape(in_shape, params.filter_stride[0],\n params.filter_stride[1],\n params.filter_shape[-1])\n\n def OutShape(self, in_shape):\n \"\"\"Compute the output shape given the input shape.\"\"\"\n p = self.params\n return _ComputeConvOutputShape(in_shape, p.filter_stride[0],\n p.filter_stride[1], self.output_channels)\n\n\n def _GetWeights(self,\n theta,\n convolution_lambda,\n folded_bn_padding,\n cast_dtype=None):\n \"\"\"Gets a dictionary of weights and biases for the convolution.\n\n This is necessary for some operating modes where the weights are fused\n with batch normalization differently for training vs eval.\n\n Args:\n theta: A `.NestedMap` object containing underlying weights values of this\n layer and its children layers.\n convolution_lambda: Lambda which takes the convolution weights and runs\n the convolution.\n folded_bn_padding: Padding to apply to folded batch normalization moment\n computation (or None for no padding).\n cast_dtype: If not None, cast weights to the given dtype.\n\n Returns:\n Tuple of (filter, biases).\n \"\"\"\n p = self.params\n\n # Original weights.\n filter_w = theta.w\n filter_output_shape = self.filter_output_shape\n # TODO(miachen): remove casting once tf.nn.conv2d supports tf.float64.\n if cast_dtype:\n filter_w = tf.cast(filter_w, tf.float32)\n if p.weight_norm:\n if len(filter_output_shape) == 1:\n # Normalize along the last dim (standard conv).\n filter_w = tf.nn.l2_normalize(filter_w, [0, 1, 2]) * tf.reshape(\n (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]])\n elif len(filter_output_shape) == 2:\n # Normalize along the last two dimensions (depthwise conv).\n filter_w = tf.nn.l2_normalize(filter_w, [0, 1]) * tf.reshape(\n (theta.g + 1.0), [1, 1] + filter_output_shape)\n else:\n assert False, 'Unsupported weight norm filter shape'\n\n # Original bias.\n if p.bias:\n b = theta.b\n else:\n b = tf.zeros([symbolic.ToStatic(self.output_channels)],\n dtype=filter_w.dtype)\n\n # Pass-through if weights are not folded with batch normalization.\n if not self._is_bn_folded:\n return filter_w, b\n\n # If batch norm is fused with weights, then compute the weights as from\n # figure C.8 of https://arxiv.org/pdf/1712.05877.pdf for training and\n # figure C.6 for eval.\n if self.do_eval:\n # Gets current moments without updating.\n mean, variance, beta, gamma = self.bn.GetCurrentMoments(theta.bn)\n else:\n # Updates moments based on a trial run of the convolution.\n raw_conv_output = convolution_lambda(filter_w)\n mean, variance, beta, gamma = self.bn.ComputeAndUpdateMoments(\n theta.bn, raw_conv_output, folded_bn_padding)\n\n # Fold weights and bias. Note that this layer's bias is not used (not\n # applicable for batch norm case).\n sigma_recip = tf.math.rsqrt(variance + self.bn.epsilon)\n scale_correction = gamma * sigma_recip\n # Normal conv will have all weights in the last dim\n # ([_, _, _, output_channels]), which matches the 1D layout from\n # batch norm. Depthwise uses the last two dims so reshape\n # ([_, _, in_c, c_multiplier]).\n scale_correction = tf.reshape(scale_correction, filter_output_shape)\n filter_w = filter_w * scale_correction\n b = (beta - (gamma * mean * sigma_recip))\n return filter_w, b\n\n def _ApplyConv(self, theta, inputs, folded_bn_padding=None):\n p = self.params\n strides = [p.filter_stride[0], p.filter_stride[1]]\n dtype = inputs.dtype\n cast_dtype = None\n if dtype != tf.float32:\n cast_dtype = tf.float32\n inputs = tf.cast(inputs, cast_dtype)\n\n padding_algorithm = 'SAME'\n if p.causal_convolution:\n # Causal convolution is only applied in time (height) dimension.\n # Use VALID padding and shift the inputs to the right to ensure that the\n # first output only depends on the first input and so on. The output is\n # the same size as the input, as if the convolution used SAME padding.\n padding_algorithm = 'VALID'\n # The effective spatial filter size for dilated convolutions is\n # (kernel - 1) * dilation_rate + 1 as according to\n # https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]\n\n # Apply padding in width dimension to mimic SAME padding.\n # Using the similar logic as above to produce the same number of output\n # as if SAME padding is used.\n width_pad_size = (p.filter_shape[1] - 1) * p.dilation_rate[1]\n\n # The amount of padding on the left is tricky. If stride > 1, total\n # padding required for SAME padding would be:\n # pad = ceil(input_size / stride - 1) * stride + eff_kernel - input_size\n # where eff_kernel = (kernel - 1) * dilation_rate + 1\n # TensorFlow also pads more on the right / bottom side if total padding\n # required is an odd number, so pad_left = pad // 2\n # Therefore pad_left could depend on input size, which might be dynamic.\n # Here we only handle two special cases where 1) stride = 1, then\n # pad_left = (eff_kernel - 1) // 2\n # and 2) kernel = 1, then\n # pad_left = 0\n if p.filter_stride[1] > 1 and p.filter_shape[1] > 1:\n raise ValueError('Causal convolution only supports width stride = 1 '\n 'or filter width = 1.')\n width_pad_left = max(0, width_pad_size - 1) // 2\n width_pad_right = width_pad_size - width_pad_left\n inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0],\n [width_pad_left, width_pad_right], [0, 0]])\n\n # Lambda for computing the actual convolution.\n def ComputeRawConvolution(filter_w):\n return self._EvaluateConvKernel(\n inputs,\n filter_w=filter_w,\n strides=strides,\n dilation_rate=p.dilation_rate,\n data_format='NHWC',\n padding_algorithm=padding_algorithm)\n\n filter_w, b = self._GetWeights(\n theta, ComputeRawConvolution, folded_bn_padding, cast_dtype=cast_dtype)\n\n # TODO(miachen): remove casting once tf.nn.conv2d supports tf.float64.\n assert inputs.dtype == filter_w.dtype\n\n filter_w = self.QWeight(filter_w)\n out = ComputeRawConvolution(filter_w)\n\n # Note that we always apply the bias (which may be zero) because some\n # normalization mechanisms do implicitly produce a bias.\n b = tf.cast(b, tf.float32)\n out = tf.nn.bias_add(out, b)\n\n if dtype != tf.float32:\n out = tf.cast(out, dtype)\n return out\n\n def FProp(self, theta, inputs, paddings=None,*args):\n \"\"\"Apply convolution to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor. If None, the inputs have no paddings in the\n sense of sequence training (e.g., in CNN models). Otherwise, it is\n expected to be of shape [batch, time].\n\n Returns:\n outputs, out_paddings pair.\n \"\"\"\n p = self.params\n if paddings is None:\n inputs = py_utils.with_dependencies([\n py_utils.assert_shape_match(\n tf.shape(inputs), [-1, -1, -1, p.filter_shape[2]])\n ], inputs)\n else:\n inputs = py_utils.with_dependencies([\n py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),\n py_utils.assert_shape_match(\n tf.shape(inputs),\n tf.concat([tf.shape(paddings), [-1, p.filter_shape[2]]], 0))\n ], inputs)\n # Zeroing out padded inputs.\n qpadding = self.QRPadding(\n tf.expand_dims(tf.expand_dims(paddings, -1), -1))\n # Select based padding is required for quantized inference but is\n # causing regressions on other platforms. TODO: Remove use_select\n # attribute when root-caused/resolved.\n inputs = py_utils.ApplyPadding(\n qpadding,\n inputs,\n use_select=p.is_inference and p.qdomain.default is not None)\n\n with tf.name_scope(p.name):\n input_shape = tf.shape(inputs)\n\n if paddings is None:\n conv_padding = None\n else:\n # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1.\n # But there's likely no real problems. Trying to set it gives an error:\n # pooling with SAME padding is not implemented for dilation_rate > 1.\n # NOTE: window=p.filter_stride[0] means output i will be padded if any\n # input in the stride between the two conv centers are padded.\n conv_padding = _ComputeConvOutputPadding(\n paddings, window=p.filter_stride[0], stride=p.filter_stride[0])\n\n if p.conv_last:\n out = self._ComputeConvLast(theta, inputs, paddings, conv_padding)\n else:\n out = self._Compute(theta, inputs, paddings, conv_padding)\n\n # Lastly zeroing out padded states.\n if conv_padding is not None:\n qpadding = self.QRPadding(\n tf.expand_dims(tf.expand_dims(conv_padding, -1), -1))\n # Select based padding is required for quantized inference but is\n # causing regressions on other platforms. TODO: Remove use_select\n # attribute when root-caused/resolved.\n out = py_utils.ApplyPadding(\n qpadding,\n out,\n use_select=p.is_inference and p.qdomain.default is not None)\n\n out = py_utils.HasShape(\n out, symbolic.ToStatic(BaseConv2DLayer.OutShape(self, input_shape)))\n return out, conv_padding\n \"\"\"modified code\n if(conv_padding is not None):\n return out, conv_padding\n else:\n return out\n \"\"\"\n def _Compute(self, theta, inputs, paddings, conv_padding):\n \"\"\"Computes the forward prop (conv, bn, act).\"\"\"\n p = self.params\n\n bn_padding = conv_padding\n if bn_padding is None:\n bn_padding_expanded = None\n else:\n batch_time = tf.shape(bn_padding)\n batch_time_any_any = tf.concat([batch_time, [-1, -1]], 0)\n bn_padding_expanded = tf.reshape(bn_padding,\n tf.concat([batch_time, [1, 1]], 0))\n\n out = self._ApplyConv(theta, inputs, bn_padding_expanded)\n if bn_padding is not None:\n out = py_utils.with_dependencies([\n py_utils.assert_shape_match(batch_time, [-1, -1]),\n py_utils.assert_shape_match(tf.shape(out), batch_time_any_any)\n ], out)\n\n # Only apply batch norm if it was not folded into the weights.\n if p.batch_norm and not p.bn_fold_weights:\n out = self.bn.FProp(theta.bn, out, bn_padding_expanded)\n\n # Apply activation.\n if p.activation != 'NONE':\n if p.activation not in _TFLITE_FUSED_ACTIVATION_NAMES:\n out = self.QTensor('pre_activation', out)\n out = activations.GetFn(p.activation)(out)\n if not p.disable_activation_quantization:\n out = self.QTensor('activation', out)\n\n return out\n\n def _ComputeConvLast(self, theta, inputs, paddings, conv_padding):\n \"\"\"Computes the forward prop in conv_last mode (bn, act, conv).\"\"\"\n p = self.params\n out = inputs\n out_padding = paddings\n\n if p.batch_norm:\n if out_padding is None:\n out_padding_expanded = None\n else:\n batch_time = tf.shape(out_padding)\n batch_time_any_any = tf.concat([batch_time, [-1, -1]], 0)\n out = py_utils.with_dependencies([\n py_utils.assert_shape_match(batch_time, [-1, -1]),\n py_utils.assert_shape_match(tf.shape(out), batch_time_any_any)\n ], out)\n out_padding_expanded = tf.reshape(out_padding,\n tf.concat([batch_time, [1, 1]], 0))\n out = self.bn.FProp(theta.bn, out, out_padding_expanded)\n\n if p.activation != 'NONE':\n out = activations.GetFn(p.activation)(out)\n\n out = self._ApplyConv(theta, out)\n\n return out\n\n\nclass Conv2DLayer(BaseConv2DLayer):\n \"\"\"Convolution layer, with optional batch-normalization and activation.\"\"\"\n\n def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,\n padding_algorithm, data_format):\n p = self.params\n return tf.nn.convolution(\n inputs,\n filter_w,\n strides=strides,\n dilations=p.dilation_rate,\n data_format='NHWC',\n padding=padding_algorithm)\n @classmethod #added fix to include meta flops for conv2d\n def FPropMeta(cls, p, inputs,*args):\n py_utils.CheckShapes((inputs,))\n temp = BaseConv2DLayer(p)\n shape = temp.OutShape(inputs)\n outputs = tshape.Shape(shape)\n return py_utils.NestedMap(flops=1, out_shapes=(outputs,))\n\n\nclass ConvNN2DLayer(BaseConv2DLayer):\n \"\"\"Convolution layer, based on tf.nn.conv2d instead of tf.nn.convolution.\n\n tf.nn.convolution is using a different implementation on atrous convolutions,\n by wrapping the actual convolution with space_to_batch and batch_to_space.\n This implementation is not supported in tflite conversion, hence we need\n a different layer for using atrous convolutions.\n \"\"\"\n\n def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,\n padding_algorithm, data_format):\n p = self.params\n return tf.nn.conv2d(\n inputs,\n filter_w,\n strides=strides,\n dilations=p.dilation_rate,\n data_format='NHWC',\n padding='SAME')\n\n\n# Alias of Conv2DLayer (for compatibility with historical uses).\nConvLayer = Conv2DLayer\n\n\nclass DepthwiseConv2DLayer(BaseConv2DLayer):\n \"\"\"Depthwise conv 2D layer.\n\n paper: https://arxiv.org/abs/1610.02357\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n # Redefine 'filter_shape' since the semantic of shape elements is different\n # from regular Conv2D.\n p.Delete('filter_shape')\n p.Define(\n 'filter_shape', (0, 0, 0, 0),\n 'Filter shape. Must be a sequence of length 4. Elements are in'\n ' the order of height (time), width (frequency), in_channel,'\n ' channel_multipliers. ')\n return p\n\n @property\n def output_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n p = self.params\n # Depthwise convolution filter shape is:\n # [..., in_channels, channel_multiplier].\n return p.filter_shape[-2] * p.filter_shape[-1]\n\n @property\n def filter_output_shape(self):\n \"\"\"Final dims of the filter corresponding to the output channels.\"\"\"\n # Depthwise convolution uses the final two dims for output channels.\n p = self.params\n _, _, in_c, c_mul = p.filter_shape\n return [in_c, c_mul]\n\n def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,\n padding_algorithm, data_format):\n p = self.params\n return tf.nn.depthwise_conv2d(\n inputs,\n filter=filter_w,\n strides=[1, strides[0], strides[1], 1],\n dilations=p.dilation_rate,\n data_format='NHWC',\n padding=padding_algorithm)\n\n\nclass SeparableConv2DLayer(Conv2DLayer):\n \"\"\"Separable 2D convolution.\n\n This class aggregates a DepthwiseConv2DLayer that feeds in to the point\n wise convolution defined by this layer. Since the point wise convolution\n controls the output, this class is defined in terms of that and delegates\n to a depthwise sub-layer.\n\n The `filter_shape` parameter is rewritten on initialization from the form:\n (h, w, cin, cout)\n To:\n Depthwise filter: (h, w, cin, p.depth_multiplier)\n Pointwise filter (on this instance): (1, 1, cin * p.depth_multiplier, cout)\n\n This way, the layer is configured as if it were a normal 2D convolution\n but is internally reconfigured to be separable.\n\n paper: https://arxiv.org/abs/1610.02357\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'depth_multiplier', 1,\n 'Number of depthwise convolution output channels per input channel. '\n 'The total number of depthwise convolution output channels will be.'\n 'equal to in_channel * depth_multiplier.')\n p.Define('depthwise_tpl',\n DepthwiseConv2DLayer.Params().Set(activation='NONE'),\n 'Template for the depthwise conv sub-layer.')\n return p\n\n def __init__(self, params):\n # Rewrite the filter.\n params = params.Copy()\n h, w, cin, cout = params.filter_shape\n params.filter_shape = (1, 1, cin * params.depth_multiplier, cout)\n depthwise_filter_shape = (h, w, cin, params.depth_multiplier)\n\n # Dilation rate and stride go to the depthwise layer and reset ours.\n depthwise_filter_stride = params.filter_stride\n depthwise_dilation_rate = params.dilation_rate\n params.filter_stride = (1, 1)\n params.dilation_rate = (1, 1)\n\n super().__init__(params)\n p = self.params\n del params\n\n # Create the depthwise sub-layer.\n depthwise_params = p.depthwise_tpl.Copy().Set(\n filter_shape=depthwise_filter_shape,\n filter_stride=depthwise_filter_stride,\n dilation_rate=depthwise_dilation_rate,\n causal_convolution=p.causal_convolution,\n weight_norm=p.weight_norm,\n batch_norm=p.batch_norm,\n bn_decay=p.bn_decay,\n bn_fold_weights=p.bn_fold_weights)\n depthwise_params.qdomain.default = p.qdomain.default\n self.CreateChild('depthwise_conv', depthwise_params)\n\n def FProp(self, theta, inputs, paddings=None):\n inputs, paddings = self.depthwise_conv.FProp(theta.depthwise_conv, inputs,\n paddings)\n return super().FProp(theta, inputs, paddings)\n\n def OutShape(self, in_shape):\n \"\"\"Compute the output shape given the input shape.\"\"\"\n in_shape = self.depthwise_conv.OutShape(in_shape)\n return super().OutShape(in_shape)\n\n\nclass ProjectionLayer(quant_utils.QuantizableLayer):\n \"\"\"Projection layer, with batch normalization and relu activation.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Depth of the input.')\n p.Define('output_dim', 0, 'Depth of the output.')\n p.Define(\n 'activation', 'RELU',\n 'Activation function to use. Options are RELU, RELU6, SIGMOID, '\n 'TANH, NONE.')\n p.Define('batch_norm', None, 'Whether or not to apply batch norm.')\n p.Define('has_bias', False,\n 'Whether or not to introduce the bias params to the layer.')\n p.Define('bias_init', 0.0, 'Initial value for the bias')\n p.Define(\n 'affine_last', False,\n 'If true, apply the affine transformation as the last step, i.e., '\n 'first apply batch normalization on the input, followed '\n 'by activation, and finally the affine transformation. '\n 'Otherwise, apply affine transformation first, followed by batch '\n 'normalization and activation.')\n p.Define(\n 'weight_norm', False,\n 'If true, apply weight normalization to weights as proposed by'\n ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')\n p.Define(\n 'bn_fold_weights', None,\n 'Fold the batch norm parameters into the convolution weights at '\n 'eval/inference time as per https://arxiv.org/pdf/1712.05877.pdf. '\n 'Defaults to None which means that it will be disabled by default '\n 'and enabled when quantized training is enabled. Not compatible with '\n 'affine_last=True')\n p.Define('bn_params',\n BatchNormLayer.Params().Set(decay=0.999),\n 'Default params for batch norm layer.')\n p.Define('apply_pruning', False,\n 'Whether to prune the weights while training')\n p.Define(\n 'use_einsum', True, 'Whether to use tf.einsum for optimizing '\n 'computations. When this is set to False, this causes an increase in '\n 'TPU memory usage (b/158336491). When this is set to True, it might '\n ' cause problems with model quantization for on device inference '\n '(b/146421936)')\n p.Define(\n 'use_blocked_matmul', False, 'Whether to use blocked matrix '\n 'multiplications. This allows for weight updates to be paralellized'\n ' across the cores for Shampoo optimizer.')\n p.Define('block_dim', 1024, 'Dimension of the block')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert symbolic.EvalExpr(symbolic.STATIC_VALUES, p.input_dim) > 0\n assert symbolic.EvalExpr(symbolic.STATIC_VALUES, p.output_dim) > 0\n assert p.activation == 'NONE' or activations.IsSupported(p.activation)\n if p.batch_norm is None:\n raise RuntimeError(\n 'ProjectionLayer.batch_norm not set explicitly for %s' % self.path)\n if p.batch_norm and p.has_bias:\n tf.logging.warning(\n 'Projection layer enables both batch_norm and has_bias. '\n 'This is generally redundant/wasteful and may introduce '\n 'accuracy problems in some inference scenarios.')\n if self._is_bn_folded:\n assert not p.use_blocked_matmul, (\n 'bn_fold_weights requires use_blocked_matmul = False')\n assert not p.affine_last, (\n 'Folded batchnorm is not compatible with affine_last')\n\n if p.use_einsum:\n assert not p.use_blocked_matmul, (\n 'use_einsum requires use_blocked_matmul = False')\n\n if p.batch_norm:\n bn_params = p.bn_params.Copy()\n bn_params.name = p.name\n bn_params.dim = p.input_dim if p.affine_last else p.output_dim\n\n self.CreateChild('bn', bn_params)\n # TODO(yonghui): implement the variational noise logic.\n\n def _GetBlockedMatMulInputOutputMultipliers(self):\n \"\"\"Get number of input and output blocks.\"\"\"\n p = self.params\n # Number of input and output blocks.\n w_im = p.input_dim // p.block_dim\n w_om = p.output_dim // p.block_dim\n # Add padding if input_dim / output_dim is not divisible by block_dim.\n if p.input_dim % p.block_dim != 0:\n w_im += 1\n if p.output_dim % p.block_dim != 0:\n w_om += 1\n return w_im, w_om\n\n def _GetBlockedWeightMatrix(self, w):\n \"\"\"Returns a 3D weight matrix for blocked matmul.\"\"\"\n p = self.params\n # w is 3D Tensor of shape [i * o, block_dim, block_dim] such that\n # i * block_dim = num_inputs (modulo padding).\n # j * block_dim = num_outputs\n #\n # To efficiently apply forward prop, we transpose and reshape w into\n # shape [i * block_dim, o, block_dim]\n w_im, w_om = self._GetBlockedMatMulInputOutputMultipliers()\n block_dim = p.block_dim\n w_4d = tf.reshape(w, [w_im, w_om, block_dim, block_dim])\n # Transpose to [i, block_dim, o, block_dim].\n w_4d_t = tf.transpose(w_4d, [0, 2, 1, 3])\n w = tf.reshape(w_4d_t, [w_im * block_dim, w_om, block_dim])\n # Slice out padding from the weight matrix.\n if p.input_dim % p.block_dim != 0:\n w = tf.slice(w, [0, 0, 0], [p.input_dim, w_om, block_dim])\n return w\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n if p.use_blocked_matmul:\n w_im, w_om = self._GetBlockedMatMulInputOutputMultipliers()\n w_pc = py_utils.WeightParams(\n shape=[w_im * w_om, p.block_dim, p.block_dim],\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n else:\n w_pc = py_utils.WeightParams(\n shape=[p.input_dim, p.output_dim],\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n if p.apply_pruning:\n mask_w_pc = py_utils.WeightParams(w_pc.shape,\n py_utils.WeightInit.Constant(1.0),\n p.dtype)\n threshold_w_pc = py_utils.WeightParams([],\n py_utils.WeightInit.Constant(0.0),\n tf.float32)\n if p.has_bias:\n b_pc = py_utils.WeightParams(\n shape=[p.output_dim],\n init=py_utils.WeightInit.Constant(scale=p.bias_init),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n if p.weight_norm:\n g_pc = py_utils.WeightParams(\n shape=[p.output_dim],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n weights_var_name = 'w'\n if p.apply_pruning:\n mask_var_name = 'mask'\n threshold_var_name = 'threshold'\n self.CreateVariable(\n mask_var_name, mask_w_pc, theta_fn=None, trainable=False)\n self.CreateVariable(\n threshold_var_name, threshold_w_pc, theta_fn=None, trainable=False)\n\n def MaskWeightFn(weight):\n return tf.multiply(\n self.AddGlobalVN(weight), getattr(self.vars, mask_var_name),\n 'masked_w')\n\n self.CreateVariable(weights_var_name, w_pc, theta_fn=MaskWeightFn)\n pruning_utils.AddToPruningCollections(\n getattr(self.vars, weights_var_name), getattr(self.vars,\n mask_var_name),\n getattr(self.vars, threshold_var_name))\n else:\n self.CreateVariable(weights_var_name, w_pc)\n\n if p.has_bias:\n self.CreateVariable('b', b_pc)\n if p.weight_norm:\n self.CreateVariable('g', g_pc)\n\n # Determine quantization needs based on whether fusing activation\n # or not.\n self._pre_activation_qt_name = None\n self._output_qt_name = ('activation'\n if p.activation != 'NONE' else 'affine_matmul')\n if (p.activation != 'NONE' and\n p.activation not in _TFLITE_FUSED_ACTIVATION_NAMES):\n # Not a fused activation function.\n # Need a qtensor to track the pre-activation tensor. The name is\n # compatible with older checkpoints.\n self._pre_activation_qt_name = 'affine_matmul'\n self.TrackQTensor(self._output_qt_name)\n if self._pre_activation_qt_name:\n self.TrackQTensor(self._pre_activation_qt_name)\n\n def _CreateChildrenVariables(self):\n # Backwards compatibility: manually call child.InstantiateVariables()\n # outside of tf.variable_scope(p.name).\n if self.params.batch_norm:\n self.bn.InstantiateVariables()\n super()._CreateChildrenVariables()\n\n @classmethod\n def NumOutputNodes(cls, p):\n return p.output_dim\n\n @property\n def output_qt_name(self):\n \"\"\"Name of QTensor used for the output value.\n\n Useful for grabbing the quantization of the output.\n\n Returns:\n String name of output qtensor.\n \"\"\"\n return self._output_qt_name\n\n def FProp(self, theta, inputs, paddings=None):\n \"\"\"Apply projection to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. Shaped [..., input_dim].\n paddings: The paddings tensor. Shaped [..., 1], where all but the last\n dimension match.\n\n Returns:\n Output after applying projection, and optionally batch normalization and\n relu non-linearity.\n \"\"\"\n p = self.params\n arry = inputs.shape\n prod = 1\n for i in range(1,len(arry)):\n prod *=arry[i]\n batchsize = arry[0]\n arry2 = [batchsize,prod]\n inputs = tf.reshape(inputs,(batchsize,prod))\n with tf.name_scope(p.name):\n if paddings is None:\n paddings = tf.zeros(\n tf.concat([py_utils.GetShape(inputs)[:-1], [1]], axis=0),\n dtype=inputs.dtype)\n w, b = self._GetWeights(theta, inputs, paddings)\n w = self.AqtWeight(w, feature_axis=-1)\n w = self.QWeight(w)\n\n if p.affine_last:\n # Reversed computation. Does not handle folding.\n out = inputs\n if p.batch_norm:\n out = self.bn.FProp(theta.bn, out, paddings)\n if p.activation != 'NONE':\n if not p.is_inference:\n out = py_utils.CheckNumerics(out)\n out = activations.GetFn(p.activation)(out)\n out = self._ApplyProjectionKernel(w, b, out, with_activation=False)\n else:\n # Normal ordered projection.\n if self._is_bn_folded or not p.batch_norm:\n # Everything folded together. This is the only variant that supports\n # quantization.\n out = self._ApplyProjectionKernel(w, b, inputs, quant=True)\n else:\n # Projection kernel(no activation fn) -> BN -> Activation fn.\n out = self._ApplyProjectionKernel(w, b, inputs, with_activation=False)\n if p.batch_norm:\n out = self.bn.FProp(theta.bn, out, paddings)\n if p.activation != 'NONE':\n if not p.is_inference:\n out = py_utils.CheckNumerics(out)\n out = activations.GetFn(p.activation)(out)\n return py_utils.ApplyPadding(self.QRPadding(paddings), out)\n\n @property\n def _is_bn_folded(self):\n \"\"\"Whether batchnorm folded weights are effectively enabled.\"\"\"\n p = self.params\n if not p.batch_norm:\n return False\n return (p.bn_fold_weights or\n (p.bn_fold_weights is None and p.qdomain.default is not None))\n\n def _GetWeights(self, theta, inputs, paddings):\n \"\"\"Gets the weights for the computation.\n\n Weights will always have weight_norm applied and may have batch_norm\n folded if enabled.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: Inputs (needed for batchnorm folding).\n paddings: Paddings (needed for batchnorm folding).\n\n Returns:\n Tuple of (w, b) to use for the forward pass. b may be None if bias is\n disabled.\n \"\"\"\n p = self.params\n w = theta.w\n b = theta.b if p.has_bias else None\n if p.use_blocked_matmul:\n w = self._GetBlockedWeightMatrix(w)\n if p.weight_norm:\n w = tf.nn.l2_normalize(w, 0)\n else:\n if p.weight_norm:\n w = tf.reshape((theta.g + 1.0) * tf.nn.l2_normalize(w, [0]),\n py_utils.ToStaticShape([p.input_dim, p.output_dim]))\n\n if not self._is_bn_folded:\n return w, b\n\n # If batch norm is fused with weights, then compute the weights as from\n # figure C.8 of https://arxiv.org/pdf/1712.05877.pdf for training and\n # figure C.6 for eval.\n if self.do_eval:\n # Gets current moments without updating.\n mean, variance, beta, gamma = self.bn.GetCurrentMoments(theta.bn)\n else:\n # Updates moments based on a trial run of the kernel (without activation\n # function).\n raw_output = self._ApplyProjectionKernel(\n w, b, inputs, with_activation=False)\n mean, variance, beta, gamma = self.bn.ComputeAndUpdateMoments(\n theta.bn, raw_output, paddings)\n\n # Fold weights and bias.\n sigma_recip = tf.math.rsqrt(variance + self.bn.epsilon)\n scale_correction = gamma * sigma_recip\n w = w * scale_correction\n b = beta - (gamma * mean * sigma_recip)\n return w, b\n\n def _ApplyProjectionKernel(self,\n w,\n b,\n inputs,\n with_activation=True,\n quant=False,\n bn=False):\n \"\"\"Applies matmul/bias/activation in one step.\n\n Note that it is important that these three ops be computed in this way as\n downstream inference engines (esp. for quantized inference) can recognize\n and fuse them. For floating point, this is an optimization, but for\n quantization, it is required.\n\n Args:\n w: Weight matrix.\n b: Bias vector (or None).\n inputs: FProp inputs.\n with_activation: Whether to also compute the activation function.\n quant: Whether to apply quantization.\n bn: Apply batchnorm.\n\n Returns:\n Output tensor reshaped.\n \"\"\"\n p = self.params\n \"\"\"\n arry = inputs._shape\n prod = 1\n for i in range(1,len(arry)):\n prod *=arry[i]\n batchsize = arry[0]\n arry2 = [batchsize,prod]\n inputs = tshape.Shape(arry2)\n inputs = inputs.ToTensorShape()\n \"\"\"\n if not p.use_blocked_matmul:\n if p.use_einsum:\n out = py_utils.ProjectLastDim(inputs, w, p.input_dim, p.output_dim)\n else:\n out = py_utils.Matmul(\n tf.reshape(inputs, py_utils.ToStaticShape([-1, p.input_dim])), w)\n else:\n x = tf.reshape(inputs, py_utils.ToStaticShape([-1, p.input_dim]))\n out = tf.einsum('bn,nmk->bmk', x, w)\n # Create an output layer [b, num_outputs].\n bsz = py_utils.GetShape(out)[0]\n out = tf.reshape(out, [bsz, -1])\n if p.output_dim % p.block_dim != 0:\n out_shape = [bsz, p.output_dim]\n out = tf.slice(out, [0, 0], out_shape)\n\n if b is not None:\n out += b # NOTE: Bias on matmul is never quantized.\n return self._ApplyActivationFunction(out, inputs, with_activation, quant)\n\n def _ApplyActivationFunction(self,\n out,\n inputs,\n with_activation=True,\n quant=False):\n \"\"\"Applies the activation function in one step.\n\n Args:\n out: The result of applying the weight matrix (and bias) to the inputs.\n inputs: FProp inputs.\n with_activation: Whether to also compute the activation function.\n quant: Whether to apply quantization.\n\n Returns:\n Output tensor reshaped.\n \"\"\"\n p = self.params\n if with_activation and p.activation != 'NONE':\n if self._pre_activation_qt_name:\n # Track quantization for unfused activation function.\n out = self.QTensor(self._pre_activation_qt_name, out)\n if not p.is_inference:\n out = py_utils.CheckNumerics(out)\n out = activations.GetFn(p.activation)(out)\n if quant:\n out = self.QTensor(self._output_qt_name, out)\n if not p.use_einsum:\n out = tf.reshape(\n out,\n tf.concat([\n py_utils.GetShape(inputs)[:-1],\n py_utils.ToStaticShape([p.output_dim])\n ],\n axis=0))\n return out\n\n @classmethod\n def FPropMeta(cls, p, inputs, paddings=None):\n py_utils.CheckShapes((inputs,))\n arry = inputs._shape\n prod = 1\n for i in range(1,len(arry)):\n prod *=arry[i]\n batchsize = arry[0]\n arry2 = [batchsize,prod]\n inputs = tshape.Shape(arry2)\n assert inputs[-1] == p.input_dim\n flops = 0\n in_dim = inputs[-1]\n other_dims = inputs.num_elements() / in_dim\n # matmuls.\n flops += other_dims * p.input_dim * p.output_dim * 2\n # activations.\n flops += other_dims * p.output_dim * activations.GetFlops(p.activation)\n if p.has_bias:\n flops += p.output_dim\n out_shape = tshape.Shape(inputs[:-1] + [p.output_dim])\n if p.batch_norm:\n bn_meta = p.bn_params.cls.FPropMeta(\n p.bn_params.Copy().Set(dim=p.output_dim), out_shape)\n flops += bn_meta.flops\n if p.weight_norm:\n # l2 normalize + element-wise multiply.\n flops += 2 * p.input_dim + 2 * p.input_dim * p.output_dim + 2\n\n return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))\n\n\nclass FCLayer(ProjectionLayer):\n \"\"\"Fully-connected layer (matmul + bias + optional activation).\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.batch_norm = False\n p.has_bias = True\n return p\n @classmethod\n def FPropMeta(cls, p, inputs, paddings=None,*args):\n py_utils.CheckShapes((inputs,))\n arry = inputs._shape\n prod = 1\n for i in range(1,len(arry)):\n prod *=arry[i]\n batchsize = arry[0]\n arry2 = [batchsize,prod]\n inputs = tshape.Shape(arry2)\n assert inputs[-1] == p.input_dim\n flops = 0\n in_dim = inputs[-1]\n other_dims = inputs.num_elements() / in_dim\n # matmuls.\n flops += other_dims * p.input_dim * p.output_dim * 2\n # activations.\n flops += other_dims * p.output_dim * activations.GetFlops(p.activation)\n if p.has_bias:\n flops += p.output_dim\n out_shape = tshape.Shape(inputs[:-1] + [p.output_dim])\n if p.batch_norm:\n bn_meta = p.bn_params.cls.FPropMeta(\n p.bn_params.Copy().Set(dim=p.output_dim), out_shape)\n flops += bn_meta.flops\n if p.weight_norm:\n # l2 normalize + element-wise multiply.\n flops += 2 * p.input_dim + 2 * p.input_dim * p.output_dim + 2\n\n return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))\n def FProp(self, theta, inputs, paddings=None,*args):\n \"\"\"Apply projection to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. Shaped [..., input_dim].\n paddings: The paddings tensor. Shaped [..., 1], where all but the last\n dimension match.\n\n Returns:\n Output after applying projection, and optionally batch normalization and\n relu non-linearity.\n \"\"\"\n p = self.params\n arry = inputs.shape\n prod = 1\n for i in range(1,len(arry)):\n prod *=arry[i]\n batchsize = arry[0]\n arry2 = [batchsize,prod]\n inputs = tf.reshape(inputs,(batchsize,prod))\n with tf.name_scope(p.name):\n if paddings is None:\n paddings = tf.zeros(\n tf.concat([py_utils.GetShape(inputs)[:-1], [1]], axis=0),\n dtype=inputs.dtype)\n w, b = self._GetWeights(theta, inputs, paddings)\n w = self.AqtWeight(w, feature_axis=-1)\n w = self.QWeight(w)\n\n if p.affine_last:\n # Reversed computation. Does not handle folding.\n out = inputs\n if p.batch_norm:\n out = self.bn.FProp(theta.bn, out, paddings)\n if p.activation != 'NONE':\n if not p.is_inference:\n out = py_utils.CheckNumerics(out)\n out = activations.GetFn(p.activation)(out)\n out = self._ApplyProjectionKernel(w, b, out, with_activation=False)\n else:\n # Normal ordered projection.\n if self._is_bn_folded or not p.batch_norm:\n # Everything folded together. This is the only variant that supports\n # quantization.\n out = self._ApplyProjectionKernel(w, b, inputs, quant=True)\n else:\n # Projection kernel(no activation fn) -> BN -> Activation fn.\n out = self._ApplyProjectionKernel(w, b, inputs, with_activation=False)\n if p.batch_norm:\n out = self.bn.FProp(theta.bn, out, paddings)\n if p.activation != 'NONE':\n if not p.is_inference:\n out = py_utils.CheckNumerics(out)\n out = activations.GetFn(p.activation)(out)\n return py_utils.ApplyPadding(self.QRPadding(paddings), out)\n\n\nclass FeedForwardNet(quant_utils.QuantizableLayer):\n \"\"\"A simple multiple layer feedforward network.\n\n This class represents a stack of fully connected feedforward network. Each\n layer in the network can be configured for whether or not to have batch-norm\n applied to its output, its activation function, whether or not to apply\n dropout to post-activation output.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Depth of the input to the network.')\n p.Define('hidden_layer_dims', [], 'Depth of the hidden layer outputs.')\n p.Define(\n 'projection', ProjectionLayer.Params(),\n 'Projection layer params. A single parameter that will be shared by'\n 'all layers.')\n p.Define(\n 'dropout', DropoutLayer.Params(),\n 'Dropout layer params. Can be a single params or a tuple/list of params'\n ' having the same length as the number of layers.')\n p.Define(\n 'batch_norm', False,\n 'Whether or not to apply BN to hidden layer output. '\n 'This can be a single bool or a tuple/list of bools having the'\n ' same length as the number of layers.')\n p.Define(\n 'activation', 'RELU',\n 'The activation function to use. Can be a single string, or a'\n ' tuple/list of strings having the same length as the number'\n ' of layers.')\n p.Define(\n 'has_bias', None, 'Whether or not to use bias for projection layers.'\n 'This can be a None, single bool or a tuple/list of bools having the '\n 'same length as the number of layers. If None, the has_bias is set to '\n 'True whenever batch_norm is False for each projection layer.')\n p.Define(\n 'weight_norm', False,\n 'Whether or not to apply weight normalization to weights. This can be '\n 'a single bool or a tuple/list of bools having the same length as the '\n 'number of layers.')\n p.Define('skip_connections', None, 'Must be None.')\n p.Define(\n 'bn_fold_weights', None, 'Force folding the batch normalization '\n 'weights in the projection layer.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert symbolic.ToStatic(p.input_dim) > 0\n assert all(symbolic.ToStatic(x) > 0 for x in p.hidden_layer_dims)\n\n assert p.skip_connections is None\n batch_norm = p.batch_norm\n num_layers = len(p.hidden_layer_dims)\n if isinstance(batch_norm, (list, tuple)):\n assert len(batch_norm) == num_layers\n else:\n batch_norm = [batch_norm] * num_layers\n weight_norm = p.weight_norm\n if isinstance(weight_norm, (list, tuple)):\n assert len(weight_norm) == num_layers\n else:\n weight_norm = [weight_norm] * num_layers\n\n activation = p.activation\n if isinstance(activation, str):\n activation = [activation] * num_layers\n else:\n assert len(activation) == num_layers\n has_bias = p.has_bias\n if isinstance(has_bias, (list, tuple)):\n assert len(has_bias) == num_layers\n else:\n has_bias = [has_bias] * num_layers\n # Set has_bias to (not batch_norm) if None.\n for i in range(num_layers):\n if has_bias[i] is None:\n has_bias[i] = (not batch_norm[i])\n params_dropout_layers = p.dropout\n if isinstance(params_dropout_layers, (list, tuple)):\n assert len(params_dropout_layers) == num_layers\n else:\n params_dropout_layers = [params_dropout_layers] * num_layers\n\n # Residual connections work better in the form of:\n # y = x + Affine(Activation(BatchNorm(x)))\n params_fc_layers = []\n in_dim = p.input_dim\n for i in range(num_layers):\n out_dim = p.hidden_layer_dims[i]\n proj_out_dim = out_dim\n name = '%s_%d' % (p.name, i)\n params_i = p.projection.Copy().Set(\n batch_norm=batch_norm[i],\n weight_norm=weight_norm[i],\n has_bias=has_bias[i],\n activation=activation[i],\n input_dim=in_dim,\n output_dim=proj_out_dim,\n bn_fold_weights=p.bn_fold_weights,\n name=name)\n params_fc_layers.append(params_i)\n in_dim = out_dim\n\n if p.qdomain.default is not None:\n params_i.qdomain.default = p.qdomain.default.Copy()\n\n self.CreateChildren('fc', params_fc_layers)\n self.CreateChildren('dropout', params_dropout_layers)\n\n @property\n def output_dim(self):\n \"\"\"Returns output dimension of the FeedForwardNet.\"\"\"\n return self.params.hidden_layer_dims[-1]\n\n def FProp(self, theta, inputs, paddings=None):\n p = self.params\n num_layers = len(self.fc)\n\n in_dim, layer_in = p.input_dim, inputs\n for i in range(num_layers):\n layer_in = py_utils.with_dependencies([\n py_utils.assert_shape_match([tf.shape(layer_in)[-1]],\n [symbolic.ToStatic(in_dim)])\n ], layer_in)\n out_dim = p.hidden_layer_dims[i]\n layer_out = self.fc[i].FProp(theta.fc[i], layer_in, paddings)\n layer_out = self.dropout[i].FProp(theta.dropout[i], layer_out)\n layer_in = layer_out\n in_dim = out_dim\n return layer_in\n\n @classmethod\n def FPropMeta(cls, p, inputs, paddings=None):\n py_utils.CheckShapes((inputs,))\n assert inputs[-1] == p.input_dim\n flops = 0\n with tf.Graph().as_default(): # throw-away graph.\n instance = p.Instantiate()\n for fc in instance.fc:\n proj_params = fc.params\n proj_shape = tshape.Shape(inputs[:-1] + [proj_params.input_dim])\n proj_meta = proj_params.cls.FPropMeta(proj_params, proj_shape)\n flops += proj_meta.flops\n out_shape = tshape.Shape(inputs[:-1] + [p.hidden_layer_dims[-1]])\n return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))\n\n\nclass StackingOverTime(base_layer.BaseLayer):\n \"\"\"Stacking applied along the time axis.\n\n At each time step of an input sequence, elements are stacked over the\n window of ('left_context' + 1 + 'right_context') steps around the current\n time step. Zeros will be padded to the left or right of the sequence for\n elements around the boundaries. Finally the stacked outputs are emitted\n once every 'stride' steps.\n\n E.g. if an input sequence is: [4], [1], [9], [3], [5], [2], [8]\n left_context = 1, right_context = 1, stride = 3,\n then the output sequence would be: [0, 4, 1], [9, 3, 5], [2, 8, 0]\n\n Note that this layer only performs tensor transformation, so there are no\n learnable parameters.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('left_context', 0,\n 'Number of time steps to stack on the left to the central step.')\n p.Define('right_context', 0,\n 'Number of time steps to stack on the right to the central step.')\n p.Define('stride', 1, 'The stride for emitting the stacked output.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.left_context >= 0\n assert p.right_context >= 0\n assert p.stride >= 1\n\n @property\n def window_size(self):\n \"\"\"Returns the stacking window size.\n\n The output dimension will be window_size * the input dimension.\n\n Returns:\n Window size.\n \"\"\"\n p = self.params\n return p.left_context + p.right_context + 1\n\n def _ApplyStack(self, inputs, pad_value=0.0):\n \"\"\"The core function to apply the stacking to inputs.\n\n Args:\n inputs: [batch, time, depth].\n pad_value: the padding value for left/right context.\n\n Returns:\n [batch, ceil(time / stride), depth * stacking_window_length] tensor.\n \"\"\"\n p = self.params\n if p.left_context == 0 and p.right_context == 0:\n out = inputs\n else:\n inputs_max_len = py_utils.GetShape(inputs, 3)[1]\n # Add zero paddings to the left and right of the input sequence.\n inputs = tf.pad(\n inputs, [[0, 0], [p.left_context, p.right_context], [0, 0]],\n constant_values=pad_value)\n\n # Make window_size() copies of the padded sequence with the original\n # sequence length, where each copy is offset by 1 time step.\n pieces = []\n for i in range(self.window_size):\n pieces.append(inputs[:, i:i + inputs_max_len])\n # Apply stacking.\n out = tf.concat(pieces, 2)\n\n # Apply striding.\n out = out[:, ::p.stride]\n return out\n\n def FProp(self, inputs, paddings=None):\n \"\"\"Apply the stacking to inputs along the time axis.\n\n Args:\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n feature].\n paddings: The paddings tensor. It is expected to be of shape [batch, time,\n 1], where all but the last dimension match inputs. Each value is 0 or 1\n indicating whether a time step of a sequence is padded in the inputs to\n reach the max length in the batch.\n\n Returns:\n (outputs, out_paddings) pair.\n outputs is of shape [batch, ceil(time / stride), feature * stacking].\n out_paddings is of shape [batch, ceil(time / stride), 1]. out_paddings\n will be 0 if any of the corresponding input padding is 0.\n \"\"\"\n if paddings is None:\n paddings = tf.zeros(\n tf.concat([py_utils.GetShape(inputs)[:-1], [1]], 0),\n dtype=inputs.dtype)\n inputs = py_utils.with_dependencies(\n [\n # Checks the inputs shape has 3 dimensions.\n py_utils.assert_shape_match(tf.shape(inputs), [-1, -1, -1]),\n # Checks the paddings shape has 3 dimensions, and the last one is 1.\n py_utils.assert_shape_match(tf.shape(paddings), [-1, -1, 1]),\n # Checks the first two dimensions of inputs and paddings match.\n py_utils.assert_shape_match(\n tf.shape(inputs)[:-1],\n tf.shape(paddings)[:-1])\n ],\n inputs)\n p = self.params\n with tf.name_scope(p.name):\n outputs = self._ApplyStack(inputs)\n\n # Stack the padding values with the same context and stride parameters.\n # Then take the minimum padding values within each stacking window, since\n # an output time step becomes a padded one only if all of the underlying\n # stacked steps are padded ones.\n out_paddings = self._ApplyStack(paddings, pad_value=1)\n out_paddings = tf.reduce_min(out_paddings, axis=2, keepdims=True)\n\n return outputs, out_paddings\n\n def Unstack(self, stacked):\n \"\"\"Inverts stacking over time.\n\n Given 'stacked' outputs from this StackingOverTime layer,\n\n stacked, _ = this_layer.FProp(inputs),\n\n this method attempts to reconstruct the original 'inputs'.\n\n If stride > window_size, the original input cannot be recovered, and a\n ValueError is raised.\n\n Otherwise, if right_context + 1 >= stride, this method returns a Tensor that\n is identical to 'inputs' but potentially longer due to paddings.\n\n If right_context + 1 < stride, this method returns a Tensor that may be up\n to ```stride - right_context - 1``` frames shorter than the original input,\n but identical in the frames that are returned. e.g.::\n\n left_context = 2, right_context = 1, stride = 4\n input sequence: 1 2 3 4 5 6 7 8\n after padding: 0 0 1 2 3 4 5 6 7 8 0\n windows:\n [0 0 (1) 2] 3 4 5 6 7 8 0\n 0 0 1 2 [3 4 (5) 6] 7 8 0\n stacked:\n [[0 0 1 2], [3 4 5 6]]\n unstacked:\n [1 2 3 4 5 6], which is 4 - 1 - 1 = 2 (stride - right_context - 1)\n frames shorter than the original input.\n\n `Unstack()` can be used to project the outputs of downstream layers back to\n the shape of the original unstacked inputs. For example::\n\n inputs = ... # [batch, length, input_dim]\n # [batch, ceil(length / stride), rnn_dim]\n rnn_out = rnn.FProp(stacking.FProp(inputs)[0])\n # [batch, length, rnn_dim]\n back_projected_rnn_out = py_utils.PadOrTrimTo(\n stacking.Unstack(tf.tile(rnn_out, [1, 1, stacking.window_size])),\n py_utils.GetShape(inputs))\n\n Note this method does not take or return a separate padding tensor. The\n caller is responsible for knowing which of outputs are padding (e.g. based\n on the padding of the original FProp inputs).\n\n Args:\n stacked: Tensor of shape [batch, time, window_size * feature_dim], assumed\n to be the output of `FProp`.\n\n Returns:\n The reconstructed input Tensor, with shape\n [batch, (frames - 1) * stride + right_context + 1, feature_dim].\n\n Raises:\n ValueError: if stride > window_size.\n \"\"\"\n p = self.params\n if p.stride > self.window_size:\n raise ValueError(\n \"Can't invert StackingOverTime with stride (%d) > window_size (%d)\" %\n (p.stride, self.window_size))\n\n # Reshape to allow indexing individual frames within each stacked window.\n batch_size, stacked_length, _ = py_utils.GetShape(stacked, 3)\n stacked = tf.reshape(stacked,\n [batch_size, stacked_length, self.window_size, -1])\n\n # Compute the index of the window and frame in 'stacked' where each frame of\n # the original input is located, and extract them with tf.gather_nd.\n # First compute for all except the last window, since these elements have\n # the potential of being looked up from the next window.\n input_indices = tf.range(0, (stacked_length - 1) * p.stride)\n mod = input_indices % p.stride\n in_next_window = tf.cast(tf.greater(mod, p.right_context), tf.int32)\n window_index = input_indices // p.stride + in_next_window\n frame_index = p.left_context + mod - p.stride * in_next_window\n # Now handle the last window explicitly and concatenate onto the existing\n # window_index/frame_index tensors.\n last_window_length = p.right_context + 1\n window_index = tf.concat(\n [window_index,\n tf.fill([last_window_length], stacked_length - 1)],\n axis=0)\n frame_index = tf.concat(\n [frame_index, p.left_context + tf.range(last_window_length)], axis=0)\n # Stack the indices for tf.gather_nd.\n window_and_frame_indices = tf.stack([window_index, frame_index], axis=1)\n window_and_frame_indices = tf.tile(\n tf.expand_dims(window_and_frame_indices, 0), [batch_size, 1, 1])\n return tf.gather_nd(stacked, window_and_frame_indices, batch_dims=1)\n\n\nclass PoolingLayer(quant_utils.QuantizableLayer):\n \"\"\"Pooling layer, by default performs max-pooling.\n\n Quantization notes: Unlike the common pattern, the pooling layer inputs\n and output must be quantized to the same range, so it tracks both (vs\n just the output). The preceding layer must have its output quantization\n disabled.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'window_shape', (0, 0),\n 'Window shape. Must be a pair of ints. Elements are in'\n ' the order of height (time), width (frequency).')\n p.Define(\n 'window_stride', (0, 0),\n 'Window stride to use. Must be a pair of ints. The first int'\n ' specifies the stride on the time dimension. The second int'\n ' specifies the stride on the frequency dimension.')\n p.Define('pooling_type', 'MAX', 'Pooling type: MAX|AVG')\n p.Define(\n 'padding_algorithm', 'SAME',\n 'Padding algorithm. See the \"returns\" section of '\n '`tf.nn.convolution` for details. '\n 'Roughly, VALID = NO_PADDING and SAME (default) = PAD INPUT')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert len(p.window_shape) == 2\n assert len(p.window_stride) == 2\n assert all([x > 0 for x in p.window_shape])\n assert all([x > 0 for x in p.window_stride])\n assert p.pooling_type in ['MAX', 'AVG']\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n self.TrackQTensor('output')\n\n def OutShape(self, in_shape):\n \"\"\"Compute the output shape given the input shape.\"\"\"\n p = self.params\n return _ComputeConvOutputShape(\n in_shape,\n p.window_stride[0],\n p.window_stride[1],\n padding=p.padding_algorithm)\n\n def FProp(self, theta, inputs, paddings=None,*args):\n \"\"\"Apply pooling to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor. It is expected to be of shape [batch,\n time]. Defaults to None, which means there no paddings.\n\n Returns:\n outputs, out_paddings pair.\n \"\"\"\n p = self.params\n stride = p.window_stride\n window = p.window_shape\n if paddings is not None:\n inputs = py_utils.with_dependencies([\n py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),\n py_utils.assert_shape_match(tf.shape(inputs)[:2], tf.shape(paddings))\n ], inputs)\n with tf.name_scope(p.name):\n if paddings is not None:\n out_padding = _ComputeConvOutputPadding(paddings, window[0], stride[0],\n p.padding_algorithm)\n if p.pooling_type == 'MAX':\n # Fill dtype.min in padded positions.\n min_value = tf.ones_like(inputs) * p.dtype.min\n inputs = py_utils.ApplyPadding(paddings[..., tf.newaxis, tf.newaxis],\n inputs, min_value)\n else:\n out_padding = None\n inputs = self.QTensor('output', inputs)\n\n out = tf.nn.pool(\n inputs,\n window,\n p.pooling_type,\n strides=stride,\n padding=p.padding_algorithm,\n data_format='NHWC',\n )\n if paddings is not None and p.pooling_type == 'AVG':\n # Count the fraction of non-padding elements inside each pooling window.\n in_mask = 1.0 - paddings\n non_padding_ratio = tf.nn.pool(\n in_mask[:, :, tf.newaxis],\n window_shape=(p.window_shape[0],),\n pooling_type='AVG',\n strides=(p.window_stride[0],),\n padding=p.padding_algorithm)\n # Divide by non-padding ratios to eliminate the effect of padded values.\n out *= tf.math.reciprocal_no_nan(non_padding_ratio)[..., tf.newaxis]\n\n out = self.QTensor('output', out)\n if out_padding is not None:\n out *= tf.expand_dims(tf.expand_dims(1.0 - out_padding, -1), -1)\n return out, out_padding\n \"\"\"modified code\n if out_padding is not None:\n return out,out_padding\n else:\n return out\n \"\"\"\n @classmethod #added fix to include meta flops for conv2d\n def FPropMeta(cls, p, inputs,*args):\n py_utils.CheckShapes((inputs,))\n temp = PoolingLayer(p)\n shape = temp.OutShape(inputs)\n outputs = tshape.Shape(shape)\n return py_utils.NestedMap(flops=1, out_shapes=(outputs,))\n\nclass BlurPoolLayer(base_layer.BaseLayer):\n \"\"\"BlurPool from https://arxiv.org/pdf/1904.11486.pdf.\n\n This layer blurs the input with a fixed filter and performs subsampling\n afterwards. Only supports 2x1 or 2x2 spatial reduction.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('blur_filter', 'B5', 'One of [R2, T3, B5]; the fixed blur filter.')\n p.Define('subsample_type', '1D', 'Choose between [1D, 2D] subsampling.')\n p.Define('input_channels', None, 'Number of input channels.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.blur_filter in ['R2', 'T3', 'B5']\n assert p.subsample_type in ['1D', '2D']\n assert p.input_channels\n\n filter_dict = {\n 'B5': np.array([1, 4, 6, 4, 1], dtype=np.float32),\n 'T3': np.array([1, 2, 1], dtype=np.float32),\n 'R2': np.array([1, 1], dtype=np.float32)\n }\n base_filter = filter_dict[p.blur_filter]\n\n if p.subsample_type == '2D':\n base_filter = base_filter[:, np.newaxis] * base_filter[np.newaxis, :]\n else:\n base_filter = base_filter[:, np.newaxis]\n base_filter /= base_filter.sum()\n\n self._blur_filter = np.tile(base_filter[..., np.newaxis, np.newaxis],\n (1, 1, p.input_channels, 1))\n conv_params = DepthwiseConv2DLayer.Params().Set(\n activation='NONE',\n batch_norm=False,\n filter_stride=(1, 1),\n filter_shape=self._blur_filter.shape)\n\n self.CreateChild('blur_conv', conv_params)\n\n def FProp(self, theta, inputs, paddings=None):\n \"\"\"Apply blur pooling.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor. It is expected to be of shape [batch,\n time]. Defaults to None, which means there no paddings.\n\n Returns:\n outputs, out_paddings pair.\n \"\"\"\n p = self.params\n if paddings is not None:\n inputs = py_utils.with_dependencies([\n py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),\n py_utils.assert_shape_match(tf.shape(inputs)[:2], tf.shape(paddings))\n ], inputs)\n # blur\n theta_cp = copy.copy(theta.blur_conv)\n theta_cp.w = tf.convert_to_tensor(self._blur_filter, dtype=p.dtype)\n out, out_padding = self.blur_conv.FProp(theta_cp, inputs, paddings)\n\n # b/142399320\n # Use stride in blur conv for subsampling once non-square stride gets\n # supported.\n if p.subsample_type == '2D':\n out = out[:, ::2, ::2, :]\n else:\n out = out[:, ::2, :, :]\n\n if out_padding is not None:\n out_padding = _ComputeConvOutputPadding(\n out_padding, window=2, stride=2, padding_algorithm='SAME')\n out *= (1.0 - out_padding)[..., tf.newaxis, tf.newaxis]\n else:\n out_padding = None\n\n return out, out_padding\n\n\nclass SingleShardEmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Embedding layer that is not sharded.\n\n This embedding layer is expected to be replicated over all compute devices\n (e.g. tpu cores). It is intended to support small to medium embedding tables\n (< 50k) only.\n\n This is intended to be a unification of EmbeddingLayer and\n SimpleEmbeddingLayer (and cleanup of both). It is targeting the most common\n use-case we have in speech/nmt/tts/deeprank. Currently we often first\n configure a model using EmbeddingLayer, and then call ChangeToSimpleEmbedding\n to switch to SimpleEmbedding where we lose some configuration (e.g.\n scale_by_sqrt_dim).\n\n TODO(lingvo): Implement the matmul option which should be more efficient for\n small vocabs (e.g. < 1k vocab).\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('vocab_size', 0, 'Num tokens in vocab.')\n p.Define('embedding_dim', 0, 'Depth of the output.')\n p.Define(\n 'scale_sqrt_depth', False, 'If set True, activations are scaled'\n ' with sqrt(embedding_dim) in EmbLookup.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.vocab_size > 0\n assert p.embedding_dim > 0\n assert p.name\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n w_pc = py_utils.WeightParams(\n shape=[p.vocab_size, p.embedding_dim],\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('emb_var', w_pc)\n\n def EmbLookupDefaultTheta(self, ids):\n return self.EmbLookup(self.theta, ids)\n\n def EmbLookup(self, theta, ids):\n \"\"\"Looks up embedding vectors for ids.\n\n Args:\n theta: Named tuple with the weight matrix for the embedding.\n ids: A rank-N int32 tensor.\n\n Returns:\n A rank-(N+1) params.dtype tensor.\n embs[indices, :] is the embedding vector for ids[indices].\n \"\"\"\n p = self.params\n ids = tf.convert_to_tensor(ids)\n ids = py_utils.with_dependencies([\n py_utils.assert_between(\n ids, 0, p.vocab_size, name='vocab_id_validation')\n ], ids)\n embs = tf.nn.embedding_lookup(theta.emb_var, tf.reshape(ids, [-1]))\n if p.scale_sqrt_depth:\n embs *= p.embedding_dim**0.5\n if p.vn.global_vn or p.vn.per_step_vn:\n embs = py_utils.AddGlobalVN(p, embs)\n out_shape = tf.concat([tf.shape(ids), [p.embedding_dim]], 0)\n return tf.reshape(embs, out_shape)\n\n def FProp(self, theta, ids):\n return self.EmbLookup(theta, ids)\n\n\nclass EmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Embedding layer.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('vocab_size', 0, 'Depth of the input.')\n p.Define('embedding_dim', 0, 'Depth of the output.')\n p.Define('max_num_shards', 0, 'Num param shards.')\n p.Define('on_ps', True, 'True if to perform the embedding lookup on ps.')\n p.Define(\n 'scale_sqrt_depth', False, 'If set True, activations are scaled'\n ' with sqrt(embedding_dim) in EmbLookup.')\n return p\n\n # Min number of params per shard.\n MIN_PARAMS_PER_SHARD = 1024 * 256\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.vocab_size > 0\n assert p.embedding_dim > 0\n assert p.max_num_shards > 0\n assert p.name\n\n total_size = p.vocab_size * p.embedding_dim\n self._actual_shards = min(\n p.max_num_shards,\n int(math.ceil(float(total_size) / self.MIN_PARAMS_PER_SHARD)))\n self._ids_per_shard = int(\n math.ceil(float(p.vocab_size) / self._actual_shards))\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n w_pc = py_utils.WeightParams(\n shape=[self._ids_per_shard, p.embedding_dim],\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n # EmbeddingLayer handles vars/theta differently from other layers\n # because when embedding shards are placed on ps, it's more\n # efficiently to do embedding lookups on ps and sends the result\n # back to the worker.\n emb_vars = []\n emb_shards = []\n for i in range(self._actual_shards):\n var_name = 'var_%d' % i\n self.CreateVariable(var_name, w_pc)\n emb_vars.append(self.vars[var_name])\n # NOTE: self.theta[var_name] has transformations such as variational noise\n # applied via theta_fn in self.CreateVariable. For embedding layer we\n # apply variational noise explicitly in EmbLookup, so we do not use\n # self.theta[var_name] here.\n v = self.vars[var_name]\n if not p.on_ps:\n v = tf.identity(v)\n if p.fprop_dtype is not None and p.fprop_dtype != p.dtype:\n v = tf.cast(v, p.fprop_dtype)\n emb_shards.append(v)\n # Remove from _private_vars / _private_thetas to be added later as wm.\n del self._private_vars[var_name]\n del self._private_theta[var_name]\n self._private_vars['wm'] = emb_vars\n self._private_theta['wm'] = emb_shards\n\n def EmbLookupDefaultTheta(self, ids):\n return self.EmbLookup(self.theta, ids)\n\n def EmbLookup(self, theta, ids):\n \"\"\"Looks up embedding vectors for ids.\n\n Args:\n theta: Named tuple with the weight matrix for the embedding.\n ids: A rank-N int32 tensor.\n\n Returns:\n A rank-(N+1) params.dtype tensor.\n embs[indices, :] is the embedding vector for ids[indices].\n \"\"\"\n p = self.params\n ids = tf.convert_to_tensor(ids)\n ids = py_utils.with_dependencies([\n py_utils.assert_between(\n ids, 0, p.vocab_size, name='vocab_id_validation')\n ], ids)\n embs = tf.nn.embedding_lookup(theta.wm, tf.reshape(ids, [-1]))\n if p.scale_sqrt_depth:\n embs *= p.embedding_dim**0.5\n if p.vn.global_vn or p.vn.per_step_vn:\n embs = py_utils.AddGlobalVN(p, embs)\n out_shape = tf.concat([tf.shape(ids), [p.embedding_dim]], 0)\n return tf.reshape(embs, out_shape)\n\n\nclass SimpleEmbeddingLayer(quant_utils.QuantizableLayer):\n \"\"\"An embedding layer that is simple to compile (by XLA and Toco).\n\n The params use_matmul and use_gather control how the lookup is performed.\n If neither is True, then a loop is used to compute the embedding.\n\n This layer is \"simple\" in comparison to 'EmbeddingLayer' in that it does\n not shard the embeddings.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('vocab_size', 0,\n 'Depth of the input. I.e., the number of classes.')\n p.Define('embedding_dim', 0, 'Depth of the output.')\n p.Define(\n 'use_matmul', False, 'If True, use a matmul to implement '\n 'the embedding lookup. Depending on vocab_size and #ids, '\n 'e.g., when vocab_size is small, use_matmul can be more '\n 'efficient. On the other hand, use_matmul creates a 0/1 '\n 'sparse matrix and hence may use more memory than the '\n 'final output.')\n p.Define(\n 'fprop_mode', None, 'Sets the mode used for computing the fprop '\n '(different inference engines have different capabilities and this '\n 'accomodates them). Can be \"loop\", \"matmul\" or \"gather\". If None, '\n 'defaults to \"matmul\" if use_matmul or \"loop\" if false.')\n p.Define(\n 'use_3d_weight_tensor', False, 'If True, and use_matmul is False,'\n 'in TPU compatibility mode, we reshape the normal 2D weight'\n 'tensor to [num_rows, embed_dim] to be '\n '[num_rows, embed_dim // 128, 128].')\n p.Define('apply_pruning', False,\n 'Whether to prune the weights while training')\n p.Define(\n 'scale_sqrt_depth', False, 'If set True, activations are scaled'\n ' with sqrt(embedding_dim) in EmbLookup.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.vocab_size > 0\n assert symbolic.ToStatic(p.embedding_dim) > 0\n\n valid_fprop_modes = ['loop', 'matmul', 'gather']\n self._fprop_mode = p.fprop_mode\n if not self._fprop_mode:\n self._fprop_mode = 'matmul' if p.use_matmul else 'gather'\n assert self._fprop_mode in valid_fprop_modes, (\n 'fprop_mode must be one of %r' % valid_fprop_modes)\n\n def _FpropImpl(self, embs, ids_vec):\n \"\"\"The embedding lookup implementation.\"\"\"\n p = self.params\n emb_shape_suf, weight_shape = self._GetWeightShape()\n\n def EmbBprop(xs, ys, dys):\n \"\"\"Embedding backprop.\n\n Effectively, it computes:\n num = size of xs.ids_vec\n dembs = zeros_like(xs.embs)\n for i in range(num):\n dembs[xs.ids_vec[i], :] += dys[i, :]\n return dembs, zeros_like(xs.ids_vec)\n\n Args:\n xs: A NestedMap containing:\n - embs: The embedding matrix. Unused in the backprop.\n - ids_vec: A vector of int32 embedding ids.\n ys: Required by py_utils._DefineDefun, not used here.\n dys: A matrix of size (size of xs.ids_vec, embedding dims).\n\n Returns:\n A NestedMap containing:\n\n - embs: A matrix of the same shape of xs.embs. Gradients for xs.embs.\n - ids_vec: Zeros. Same shape as xs.ids_vec.\n \"\"\"\n del ys\n num = tf.shape(xs.ids_vec)[0]\n dembs = inplace_ops.empty(weight_shape, py_utils.FPropDtype(p), init=True)\n if len(weight_shape) != 2:\n dys_shape = tf.shape(dys)\n dys = tf.reshape(dys, [dys_shape[0]] + emb_shape_suf)\n\n def EmbBpropLoop(i, state):\n # row_id = state.ids_vec[i]\n row_id = tf.gather(state.ids_vec, i)\n # row = state.drets[i]\n row = tf.reshape(tf.gather(state.drets, i), [1] + emb_shape_suf)\n # state.dembs[row_id] = row\n state.dembs = inplace_ops.alias_inplace_add(state.dembs, [row_id], row)\n return state\n\n dembs = py_utils.ForLoop(\n body=EmbBpropLoop,\n start=0,\n limit=num,\n delta=1,\n loop_state=py_utils.NestedMap(\n ids_vec=xs.ids_vec, drets=dys, dembs=dembs)).dembs\n\n if p.scale_sqrt_depth:\n dembs *= p.embedding_dim**0.5\n\n return py_utils.NestedMap(embs=dembs, ids_vec=tf.zeros_like(ids_vec))\n\n def EmbFprop(xs):\n \"\"\"Embedding forward prop.\n\n Effectively, it computes:\n num = size of xs.ids_vec\n rets = zeros([num, embedding dim])\n for i in range(num):\n rets[i, :] = xs.embs[xs.ids_vec[i], :]\n return rets\n\n Args:\n xs: A NestedMap containing:\n - embs: The embedding matrix.\n - ids_vec: A vector of int32 embedding ids.\n\n Returns:\n The result of embedding lookups. A matrix of shape\n [num ids in xs.ids_vec, embedding dims].\n \"\"\"\n num = tf.shape(xs.ids_vec)[0]\n rets = inplace_ops.empty([num] + emb_shape_suf, py_utils.FPropDtype(p))\n\n def EmbFpropLoop(i, state):\n # row_id = state.ids_vec[i]\n row_id = tf.gather(state.ids_vec, i)\n # row = state.embs[row_id]\n row = tf.reshape(tf.gather(state.embs, row_id), [1] + emb_shape_suf)\n # state.rets[i] = row\n state.rets = inplace_ops.alias_inplace_update(state.rets, [i], row)\n return state\n\n rets = py_utils.ForLoop(\n body=EmbFpropLoop,\n start=0,\n limit=num,\n delta=1,\n loop_state=py_utils.NestedMap(\n embs=xs.embs, ids_vec=xs.ids_vec, rets=rets)).rets\n if len(weight_shape) > 2:\n rets = tf.reshape(rets, [num, symbolic.ToStatic(p.embedding_dim)])\n return rets\n\n def EmbMatmul(xs):\n \"\"\"Lookups embedding vectors by doing Matmul with one-hot vector.\"\"\"\n # lhs[i, j] is True iff xs.ids_vec[i] == j.\n lhs = tf.equal(\n tf.expand_dims(xs.ids_vec, 1),\n tf.range(p.vocab_size, dtype=xs.ids_vec.dtype))\n return tf.matmul(tf.cast(lhs, xs.embs.dtype), xs.embs)\n\n def EmbGather(xs):\n \"\"\"Lookups embedding vectors.\"\"\"\n # If tf.gather is used, the gradient for the wm will be represented as\n # IndexedSlices which is sparse. tf.tpu.cross_replica_sum turns\n # IndexedSlices into a dense tensor with undefined first dimension.\n # This may cause issues on TPU so instead we just wrap this with\n # tf.identity which allows tf.tpu.cross_replica_sum to properly compute\n # the first dim.\n return tf.nn.embedding_lookup(tf.identity(xs.embs), xs.ids_vec)\n\n xs = py_utils.NestedMap(embs=embs, ids_vec=ids_vec)\n if self._fprop_mode == 'matmul':\n return py_utils.CallDefun(EmbMatmul, xs)\n elif self._fprop_mode == 'loop':\n return py_utils.CallDefun(\n EmbFprop, xs, bak=EmbBprop, bak_as_function=True)\n elif self._fprop_mode == 'gather':\n return EmbGather(xs)\n\n def _GetWeightShape(self):\n p = self.params\n if py_utils.tpu_compat() and self._fprop_mode != 'matmul':\n if p.use_3d_weight_tensor:\n assert symbolic.ToStatic(p.embedding_dim) % 128 == 0\n emb_shape_suf = [symbolic.ToStatic(p.embedding_dim) // 128, 128]\n else:\n emb_shape_suf = [symbolic.ToStatic(p.embedding_dim)]\n else:\n emb_shape_suf = [symbolic.ToStatic(p.embedding_dim)]\n weight_shape = [p.vocab_size] + emb_shape_suf\n return emb_shape_suf, weight_shape\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n _, weight_shape = self._GetWeightShape()\n\n # Define weights\n pc = py_utils.WeightParams(\n shape=weight_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n if p.apply_pruning:\n mask_pc = py_utils.WeightParams(pc.shape,\n py_utils.WeightInit.Constant(1.0),\n p.dtype)\n threshold_pc = py_utils.WeightParams([],\n py_utils.WeightInit.Constant(0.0),\n tf.float32)\n self.CreateVariable('mask', mask_pc, theta_fn=None, trainable=False)\n self.CreateVariable(\n 'threshold', threshold_pc, theta_fn=None, trainable=False)\n\n def MaskWeightFn(weight):\n return tf.multiply(\n self.AddGlobalVN(weight), self.vars.mask, 'masked_weights')\n\n self.CreateVariable('wm', pc, theta_fn=MaskWeightFn)\n pruning_utils.AddToPruningCollections(self.vars.wm, self.vars.mask,\n self.vars.threshold)\n else:\n self.CreateVariable('wm', pc, theta_fn=None)\n\n def EmbLookupDefaultTheta(self, ids):\n \"\"\"Lookups embedding vectors for ids.\"\"\"\n return self.FProp(self.theta, ids)\n\n def EmbLookup(self, theta, ids):\n return self.FProp(theta, ids)\n\n def EmbLookupDefaultThetaOnCpu(self, ids):\n \"\"\"A faster path for CPU inference than the default gather.\"\"\"\n p = self.params\n embs = tf.nn.embedding_lookup(self.theta.wm, tf.reshape(ids, [-1]))\n out_shape = tf.concat([tf.shape(ids), [symbolic.ToStatic(p.embedding_dim)]],\n 0)\n if p.scale_sqrt_depth:\n embs *= p.embedding_dim**0.5\n return tf.reshape(embs, out_shape)\n\n def _FlatFProp(self, theta, ids):\n \"\"\"Lookups embedding vectors for ids.\n\n Args:\n theta: Named tuple collection of weights for the layer.\n ids: A rank-N int32 tensor.\n\n Returns:\n A tuple of the flattened inputs to the embedding lookup, and a tensor that\n is ready to be reshaped into the final shape in FProp.\n \"\"\"\n if not isinstance(ids, tf.Tensor):\n tf.logging.warning('ids should be a tf.Tensor!')\n ids = tf.convert_to_tensor(ids, tf.int32)\n elif ids.dtype != tf.int32:\n tf.logging.warning('ids should be tf.int32, but is %s!', ids.dtype)\n ids = tf.cast(ids, tf.int32)\n p = self.params\n ids = py_utils.with_dependencies([\n py_utils.assert_between(\n ids, 0, p.vocab_size, name='vocab_id_validation')\n ], ids)\n flat_ids = tf.reshape(ids, [-1])\n wm = self.QWeight(theta.wm)\n wm = self.ToAqtWeight(wm, feature_axis=-1)\n embs_result = self._FpropImpl(wm, flat_ids)\n\n embs_result = self.FromAqtWeight(embs_result)\n\n if p.vn.global_vn or p.vn.per_step_vn:\n p.vn.seed = p.random_seed\n embs_result = py_utils.AddGlobalVN(p, embs_result)\n\n if p.scale_sqrt_depth:\n embs_result *= p.embedding_dim**0.5\n return flat_ids, embs_result\n\n def FProp(self, theta, ids):\n \"\"\"Lookups embedding vectors for ids.\n\n Args:\n theta: Named tuple collection of weights for the layer.\n ids: A rank-N int32 tensor.\n\n Returns:\n A rank-(N+1) params.dtype tensor.\n embs[indices, :] is the embedding vector for ids[indices].\n \"\"\"\n _, embs_result = self._FlatFProp(theta, ids)\n out_shape = tf.concat(\n [tf.shape(ids), [symbolic.ToStatic(self.params.embedding_dim)]], 0)\n return tf.reshape(embs_result, out_shape)\n\n\nclass OneHotEmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Generates one-hot embeddings with uncertainties.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('vocab_size', 0,\n 'Depth of the input. I.e., the number of classes.')\n p.Define('embedding_dim', 0, 'Depth of the output.')\n p.Define('uncertainty', 0.0, 'Uncertainty of the correct ID.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.vocab_size > 1\n assert p.embedding_dim == p.vocab_size\n\n def EmbLookupDefaultTheta(self, ids):\n \"\"\"Lookups embedding vectors for ids.\"\"\"\n return self.FProp(self.theta, ids)\n\n def EmbLookup(self, theta, ids):\n return self.FProp(theta, ids)\n\n def FProp(self, theta, ids):\n \"\"\"Lookups embedding vectors for ids.\n\n Args:\n theta: Named tuple collection of weights for the layer.\n ids: A rank-N int32 tensor.\n\n Returns:\n A rank-(N+1) params.dtype tensor.\n embs[indices, :] is the embedding vector for ids[indices].\n \"\"\"\n del theta\n p = self.params\n ids = py_utils.with_dependencies([\n py_utils.assert_between(\n ids, 0, p.vocab_size, name='vocab_id_validation')\n ], ids)\n low_confidence = p.uncertainty / tf.cast(p.vocab_size - 1, tf.float32)\n high_confidence = 1.0 - p.uncertainty\n embs_result = tf.one_hot(\n ids,\n depth=p.vocab_size,\n on_value=high_confidence,\n off_value=low_confidence)\n if p.fprop_dtype is not None:\n embs_result = tf.cast(embs_result, p.fprop_dtype)\n return embs_result\n\n\nclass PositionalEmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Generates sinusoidals with respect to the position in time and dimension.\n\n Implements the positional embedding layer from 'Attention is All You Need',\n the Transformer Network.\n\n Code and comments are adapted from tensor2tensor/layers/common_attention.py\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'min_timescale', 1, 'Start of the geometric index.'\n 'Determines the periodicity of the added signal.')\n p.Define(\n 'max_timescale', 10000, 'End of the geometric index. '\n 'Determines the frequency of the added signal.')\n p.Define('embedding_dim', 0, 'Dimension of the embedding to be generated.')\n p.Define(\n 'trainable_scaling', False,\n 'Introduces a trainable scaling parameter (a scalar) that'\n ' multiplies the positional embedding in FProp.')\n p.Define('trainable_scaling_init', 1.0,\n 'Initial value of the scaling parameter.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.min_timescale\n assert p.max_timescale\n assert p.embedding_dim % 2 == 0\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n if p.trainable_scaling:\n pc = py_utils.WeightParams(\n shape=[1],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('scale', pc)\n\n def _PosEmbeddingsFromPositions(self, theta, position):\n \"\"\"Generates the positional embeddings given the position tensor.\n\n Factors out the common code from FProp and FPropWithPosition. Returns\n positional embeddings corresponding to the input position tensor.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n position: Position tensor of dtype float and shape [bs, seq_length] to\n generate positional embeddings.\n\n Returns:\n a Tensor of shape [bs, seq_length, embedding_dim].\n \"\"\"\n p = self.params\n seq_length = tf.shape(position)[1]\n num_timescales = p.embedding_dim // 2\n log_timescale_increment = (\n math.log(float(p.max_timescale) / float(p.min_timescale)) / tf.maximum(\n tf.cast(1.0, py_utils.FPropDtype(p)),\n tf.cast(num_timescales, py_utils.FPropDtype(p)) - 1))\n\n inv_timescales = p.min_timescale * tf.exp(\n tf.cast(tf.range(num_timescales), py_utils.FPropDtype(p)) *\n -log_timescale_increment)\n\n scaled_time = tf.expand_dims(position, 2) * tf.reshape(\n inv_timescales, [1, 1, -1])\n\n signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)\n signal = tf.pad(\n signal, [[0, 0], [0, 0], [0, tf.math.floormod(p.embedding_dim, -1)]])\n signal = tf.reshape(signal, [-1, seq_length, p.embedding_dim])\n if p.trainable_scaling:\n signal *= (p.trainable_scaling_init + theta.scale)\n return signal\n\n def FProp(self, theta, seq_length):\n \"\"\"Generates a Tensor of sinusoids with different frequencies.\n\n Each channel (dimension) of the generated positionanl embedding Tensor\n corresponds to a sinusoid of different frequency and phase.\n\n This allows attention to learn to use absolute and relative positions.\n Timing signals should be added to some precursors of both the query and the\n memory inputs to attention.\n\n The use of relative position is possible because sin(x+y) and cos(x+y) can\n be experessed in terms of y, sin(x) and cos(x).\n\n In particular, we use a geometric sequence of timescales starting with\n min_timescale and ending with max_timescale. The number of different\n timescales is equal to channels (dimension) / 2. For each timescale, we\n generate the two sinusoidal signals sin(timestep/timescale) and\n cos(timestep/timescale). All of these sinusoids are concatenated in\n the channels dimension.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n seq_length: Sequence length of the embeddings to be generated\n\n Returns:\n a Tensor of shape [seq_length, embedding_dim].\n \"\"\"\n p = self.params\n position = tf.reshape(\n tf.cast(tf.range(seq_length), py_utils.FPropDtype(p)), [1, seq_length])\n pos_emb = self._PosEmbeddingsFromPositions(theta, position)\n return tf.reshape(pos_emb, [seq_length, -1])\n\n def FPropWithPosition(self, theta, position_tensor):\n \"\"\"Generates a Tensor of sinusoids with different frequencies.\n\n Uses the provided position tensor to generate positional embeddings. Refer\n to FProp description for details of sinusoidal positional embeddings.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n position_tensor: Position tensor of shape [bs, seq_length] to generate\n positional embeddings.\n\n Returns:\n a Tensor of shape [bs, seq_length, embedding_dim].\n \"\"\"\n position = tf.cast(position_tensor, py_utils.FPropDtype(self.params))\n return self._PosEmbeddingsFromPositions(theta, position)\n\n\nclass RelativePositionalEmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Relative positional embedding.\n\n Section 3.2 of https://arxiv.org/pdf/1803.02155.pdf\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'radius', None,\n 'Radius of the relative window size. Distance are clipped to '\n '[-radius, radius].')\n p.Define('dim', None, 'Dimension of embedding.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n params = self.params\n if not isinstance(params.radius, numbers.Integral) or params.radius <= 0:\n raise ValueError('params.radius must be a positive int, but is %s' %\n params.radius)\n if not isinstance(params.dim, numbers.Integral) or params.dim <= 0:\n raise ValueError('params.dim must be a positive int, but is %s' %\n params.radius)\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n pc = py_utils.WeightParams(\n shape=[2 * self.params.radius + 1, self.params.dim],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=self.params.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('w', pc)\n\n def FProp(self, theta, relative_distance):\n \"\"\"Computes relative positional embedding.\n\n Args:\n theta: A NestedMap of Tensors of layer weights.\n relative_distance: A Tensor.\n\n Returns:\n A Tensor of shape relative_distance.shape + [params.dim]\n \"\"\"\n params = self.params\n clipped_indices = tf.clip_by_value(relative_distance, -params.radius,\n params.radius)\n # Right-shift indices to make them all non-negative.\n calibrated_indices = clipped_indices + params.radius\n return tf.gather_nd(theta.w, tf.expand_dims(calibrated_indices, -1))\n\n\nclass SinusoidalPositionalEmbeddingLayer(base_layer.BaseLayer):\n \"\"\"Generates sinusoidals with respect to the position in time and dimension.\n\n Implements the a variant of the positional embedding layer from 'Attention is\n All You Need', the Transformer Network that doesn't require tuning of the\n max_timescale/min_timescale. See this blog post and Ron's colab.\n https://kazemnejad.com/blog/transformer_architecture_positional_encoding\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('embedding_dim', 0, 'Dimension of the embedding to be generated.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.embedding_dim % 2 != 0:\n raise ValueError('embedding_dim needs to be even.')\n\n def FProp(self, theta, seq_length):\n \"\"\"Generates a Tensor of sinusoids with different frequencies.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n seq_length: Sequence length of the embeddings to be generated\n\n Returns:\n a Tensor of shape [seq_length, embedding_dim].\n \"\"\"\n p = self.params\n positions = tf.cast(tf.range(seq_length), py_utils.FPropDtype(p))\n num_timescales = p.embedding_dim // 2\n freq = tf.range(\n 1, num_timescales + 1,\n dtype=py_utils.FPropDtype(p)) * (2 * math.pi / seq_length)\n scaled_pos = tf.matmul(positions[:, tf.newaxis], freq[tf.newaxis, :])\n sincos = tf.concat([tf.sin(scaled_pos), tf.cos(scaled_pos)], axis=-1)\n return tf.reshape(sincos, [seq_length, -1])\n\n\nclass SoftmaxLayer(quant_utils.QuantizableLayer):\n \"\"\"Base class for softmax layers.\"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Params for SoftmaxLayer.\"\"\"\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the input.')\n p.Define('num_classes', 0, 'Total number of target classes.')\n p.Define(\n 'logits_abs_max', None, 'If not None, logits are clipped to be within'\n ' [-logits_abs_max, logits_abs_max]. This can be a scalar'\n ' or a scalar tensor. Applies back pressure at training time; ignored'\n ' for inference.')\n p.Define(\n 'chunk_size', 0, 'If non-zero, computes the per example '\n 'xent by small chunks along the batch dimension.')\n\n p.qdomain.Define('logits', None, 'Quantization domain for logits.')\n return p\n\n def Logits(self, **unused):\n \"\"\"Returns the logits computed before the softmax.\"\"\"\n raise NotImplementedError('GetLogits is not implemented.')\n\n def XentLossFromLogits(self, **unused):\n \"\"\"Returns the Xent loss from pre-computed logits.\"\"\"\n raise NotImplementedError('XentLossFromLogits is not implemented.')\n\n def XentLoss(self, *args, **kwargs):\n \"\"\"Computes cross entropy.\"\"\"\n return self.FProp(self.theta, *args, **kwargs)\n\n def _FProp2D(self,\n theta,\n inputs,\n class_weights,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Specialized FProp for matrix inputs.\"\"\"\n raise NotImplementedError(\n 'Subclasses of SoftmaxLayer must implement _FProp2D')\n\n def FProp(self,\n theta,\n inputs,\n paddings=None,#this must be none ideally (ive added this separately)\n class_weights=None,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Computes logit, cross entropy etc.\n\n This function can both work with class_ids, or probability distributions\n over classes. Exactly one of class_ids or class_probabilities must be\n provided.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: a list of a single tensor, or a single tensor with the shape [...,\n input_dim].\n class_weights: a tensor with shape [...] containing the weights for each\n target word.\n class_ids: a tensor with shape [..., 1] of int32 dtype containing the\n target class labels.\n class_probabilities: a tensor with shape [..., num_classes] of float\n values indicating class-membership probabilities.\n\n Returns:\n A `.NestedMap` containing the following fields\n\n - logits: with shape [..., num_classes]. Unnormalized softmax's logits.\n - per_example_argmax: with shape [...]. argmax of i-th example.\n - per_example_xent: with shape [...]. Cross entropy between i-th example's\n prediction and its label.\n - per_example_weight: with shape [...]. class_weights casted to\n this layer's dtype.\n - total_xent: A scalar. The sum of per_example_weight * per_example_xent.\n - total_weight: A scalar. The sum of per_example_weight.\n - avg_xent: A scalar. total_loss / total_weight.\n \"\"\"\n p = self.params\n\n # Consolidate list/single value into a list.\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n # If inputs are matrices already, delegate to _FProp2D.\n if inputs[0].shape.ndims == 2:\n return self._FProp2D(theta, inputs, class_weights, class_ids,\n class_probabilities)\n\n # Remembers the original shape[1:-1].\n shape_mid = tf.shape(inputs[0])[1:-1]\n\n # Reshape inputs to matrices, labels to vectors, etc.\n inputs = [\n tf.reshape(x, py_utils.ToStaticShape([-1, p.input_dim])) for x in inputs\n ]\n class_weights = tf.reshape(class_weights, [-1])\n if class_ids is not None:\n class_ids = tf.reshape(class_ids, [-1, 1])\n if class_probabilities is not None:\n class_probabilities = tf.reshape(class_probabilities, [-1, p.num_classes])\n\n # Delegates to _FProp2D.\n xent_loss = self._FProp2D(theta, inputs, class_weights, class_ids,\n class_probabilities)\n\n # Reshapes xent_loss fields according to the inputs' shape.\n xent_loss.logits = tf.reshape(\n xent_loss.logits, tf.concat([[-1], shape_mid, [p.num_classes]], axis=0))\n\n per_example_shape = tf.concat([[-1], shape_mid], axis=0)\n xent_loss.per_example_argmax = tf.reshape(xent_loss.per_example_argmax,\n per_example_shape)\n\n xent_loss.per_example_xent = tf.reshape(xent_loss.per_example_xent,\n per_example_shape)\n\n xent_loss.per_example_weight = tf.reshape(xent_loss.per_example_weight,\n per_example_shape)\n return xent_loss\n\n\nclass SimpleFullSoftmax(SoftmaxLayer):\n \"\"\"A somewhat simple softmax layer.\"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Params for SimpleFullSoftmax.\"\"\"\n p = super().Params()\n p.Define(\n 'num_sampled', 0, 'Number of samples to use for the sampled soft-max. '\n 'Default value of 0 means no sampling is done; if set to > 0 then '\n 'training will use sampled soft-max when both chunk_size == 0 and '\n 'FProp is called with class_probabilities=None.')\n p.Define(\n 'num_shards', 1,\n 'Number of shards to split params into. num_shards should'\n ' divide num_classes.')\n p.Define('apply_pruning', False,\n 'Whether to prune the weights while training')\n p.Define(\n 'use_num_classes_major_weight', False,\n 'Whether to use num_classes as major dimension for weight params. '\n 'This shows performance benefit especially when sharing embedding '\n 'and softmax. By removing the transpose before gather, it allows '\n 'better XLA fusions and optimizations.')\n\n p.Define(\n 'use_bias', True, 'Whether or not to use a bias variable.'\n 'Not using bias is not compatible with sampled softmax '\n '(num_sampled > 0).')\n p.Define('class_weights',None,'The default class weights')\n p.Define('class_ids',None,'The default class labels')\n p.Define('log_probs',None,'')\n p.Define('per_example_argmax',None,'')\n p.Define('per_example_xent',None,'')\n p.Define('per_example_weight',None,'')\n p.Define('total_xent',None,'')\n p.Define('total_weight',None,'')\n p.Define('avg_xent',None,'')\n\n return p\n\n def __init__(self, params):\n \"\"\"Constructs a SimpleFullSoftmax layer.\"\"\"\n super().__init__(params)\n p = self.params\n assert p.name\n # We shard params across the class dimension.\n assert p.num_classes % p.num_shards == 0\n if not p.use_bias:\n assert p.num_sampled == 0, 'Sampled softmax requires bias.'\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n\n num_classes_per_shard = p.num_classes // p.num_shards\n # When using sampled soft-max we'd rather work with weights of\n # shape=[num_classes_per_shard, p.input_dim] to avoid an expensive transpose\n # op before computing the sampled_softmax_loss.\n self._transpose_weight_params = False\n weights_shard_shape = [p.input_dim, num_classes_per_shard]\n if p.num_sampled or p.use_num_classes_major_weight:\n self._transpose_weight_params = True\n weights_shard_shape = [num_classes_per_shard, p.input_dim]\n\n pc = py_utils.WeightParams(\n shape=weights_shard_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n if p.apply_pruning:\n mask_pc = py_utils.WeightParams(pc.shape,\n py_utils.WeightInit.Constant(1.0),\n p.dtype)\n threshold_pc = py_utils.WeightParams([],\n py_utils.WeightInit.Constant(0.0),\n tf.float32)\n\n for i in range(p.num_shards):\n weights_var_name = 'weight_%d' % i\n if p.apply_pruning:\n mask_var_name = 'mask_%d' % i\n threshold_var_name = 'threshold_%d' % i\n self.CreateVariable(\n mask_var_name, mask_pc, theta_fn=None, trainable=False)\n self.CreateVariable(\n threshold_var_name, threshold_pc, theta_fn=None, trainable=False)\n\n def MaskWeightFn(weight):\n return tf.multiply(\n self.AddGlobalVN(weight), getattr(self.vars, mask_var_name),\n 'masked_weights')\n\n self.CreateVariable(weights_var_name, pc, theta_fn=MaskWeightFn)\n pruning_utils.AddToPruningCollections(\n getattr(self.vars, weights_var_name),\n getattr(self.vars, mask_var_name),\n getattr(self.vars, threshold_var_name))\n\n else:\n self.CreateVariable(weights_var_name, pc, self.AddGlobalVN)\n\n pc = py_utils.WeightParams(\n shape=[num_classes_per_shard],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n if p.use_bias:\n for i in range(p.num_shards):\n self.CreateVariable('bias_%d' % i, pc, self.AddGlobalVN)\n\n self.TrackQTensor('inputs')\n self.TrackQTensor('logits', domain='logits')\n\n def _GetInputs(self, inputs):\n if isinstance(inputs, list):\n assert len(inputs) == 1\n return inputs[0]\n return inputs\n\n def _ConcatWeights(self, theta):\n p = self.params\n # Add per-step noise if configured so.\n concat_axis = 1\n if self._transpose_weight_params:\n concat_axis = 0\n weights = [\n self.QWeight(theta['weight_%d' % i]) for i in range(p.num_shards)\n ]\n new_theta = theta.copy()\n if p.use_bias:\n biases = [self.QWeight(theta['bias_%d' % i]) for i in range(p.num_shards)]\n new_theta.bias = py_utils.AddPerStepVN(p, tf.concat(biases, axis=0))\n new_theta.wm = py_utils.AddPerStepVN(p,\n tf.concat(weights, axis=concat_axis))\n return new_theta\n\n def _LogitsUsingConcatenatedWeightsHelper(self, theta, inputs):\n p = self.params\n inputs = self.QTensor('inputs', inputs)\n # TODO(shivaniagrawal): rescaling might be expensive for softmax; move\n # rescaling after Matmul.\n wm = self.AqtWeight(theta.wm, feature_axis=-1)\n wm = self.QWeight(wm)\n\n if p.use_bias:\n bias = self.QWeight(theta.bias)\n\n # x * w + b\n # Note that theta.wm and theta.bias are transformed to concated/clipped\n # by caller.\n logits = tf.nn.bias_add(\n py_utils.Matmul(\n inputs, wm, transpose_b=self._transpose_weight_params), bias)\n else:\n logits = py_utils.Matmul(\n inputs, wm, transpose_b=self._transpose_weight_params)\n\n # Clip logits by range.\n # Note that this is generally not used in conjunction with quantization and\n # shouldn't be needed at inference time as the quantized matmul above will\n # take care of clipping naturally based on the data type and qparams.\n abs_max = p.logits_abs_max\n if abs_max is not None and not p.is_inference:\n abs_min = -abs_max # pylint: disable=invalid-unary-operand-type\n logits = py_utils.clip_by_value(logits, abs_min, abs_max)\n return logits\n\n def _LogitsUsingConcatenatedWeights(self, theta, inputs):\n logits = self._LogitsUsingConcatenatedWeightsHelper(theta, inputs)\n return self.QTensor('logits', logits)\n\n def SimpleLogits(self, theta, inputs):\n \"\"\"Returns the simple logits computed before the softmax.\n\n Compared to the Logits function, this one has only weights, no bias for the\n linear projection.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: A tensor with the shape [N, input_dim].\n\n Returns:\n logits: [N, num_classes]\n \"\"\"\n inputs = self.QTensor('inputs', inputs)\n theta = self._ConcatWeights(theta)\n wm = self.QWeight(theta.wm)\n logits = py_utils.Matmul(\n inputs, wm, transpose_b=self._transpose_weight_params)\n\n return self.QTensor('logits', logits)\n\n def Logits(self, theta, inputs):\n \"\"\"Returns the logits computed before the softmax.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: a list of a single tensor, or a single tensor with the shape [N,\n input_dim].\n\n Returns:\n logits [batch, num_classes]\n \"\"\"\n return self._LogitsUsingConcatenatedWeights(\n self._ConcatWeights(theta), self._GetInputs(inputs))\n\n def _XentLossByChunk(self, theta, activation, class_ids):\n \"\"\"Computes per-example xent loss between activation and class_ids.\"\"\"\n p = self.params\n\n # We reshape activation from a matrix to a 3-D tensor (a sequence\n # of matrices), where the 2nd dimenion is p.chunk_size. Because\n # the batch dimenion may not be multiple of p.chunk_size, we pad\n # zeros.\n activation = py_utils.HasRank(activation, 2)\n batch, input_dim = tf.unstack(tf.shape(activation))\n dim0, dim1 = (batch + p.chunk_size - 1) // p.chunk_size, p.chunk_size\n pad = dim0 * dim1 - batch\n padded_activation = tf.concat(\n [activation,\n tf.zeros([pad, input_dim], dtype=activation.dtype)],\n axis=0)\n class_ids = py_utils.HasShape(class_ids, [batch, 1])\n padded_class_ids = tf.concat(\n [class_ids, tf.zeros([pad, 1], dtype=class_ids.dtype)], axis=0)\n\n if py_utils.use_tpu():\n id_dtype = tf.int32\n else:\n id_dtype = tf.int64\n padded_class_ids = tf.cast(padded_class_ids, id_dtype)\n\n # For each chunk, we compute logits of padded_activation[i, :, :],\n # and its xent loss with padded_class_ids[i, :].\n def ChunkFn(theta, state0, inputs):\n del state0\n activation, class_ids = inputs.activation, inputs.class_ids\n logits = self._LogitsUsingConcatenatedWeights(theta, activation)\n xent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=class_ids)\n amax = tf.stop_gradient(py_utils.ArgMax(logits))\n return py_utils.NestedMap(xent=xent, amax=amax), py_utils.NestedMap()\n\n acc, _ = recurrent.Recurrent(\n theta=self._ConcatWeights(theta),\n state0=py_utils.NestedMap(\n xent=tf.zeros([p.chunk_size], dtype=p.dtype),\n amax=tf.zeros([p.chunk_size], dtype=id_dtype)),\n inputs=py_utils.NestedMap(\n activation=tf.reshape(padded_activation, [dim0, dim1, input_dim]),\n class_ids=tf.reshape(padded_class_ids, [dim0, dim1])),\n cell_fn=ChunkFn)\n\n # acc.xent has the shape [dim0, dim1]. acc.xent[i, :] are\n # per-example xent loss for examples in the i-th chunk. We\n # reshape acc.xent to a vector and slice the first 'batch' values.\n def GetBatch(x):\n return tf.reshape(x, [-1])[:batch]\n\n return GetBatch(acc.xent), GetBatch(acc.amax)\n\n @classmethod #added fix to include meta flops for conv2d\n def FPropMeta(cls, p, inputs,*args):\n dim1, dim2 = inputs[:2]\n logits = tshape.Shape([dim1, p.num_classes])\n return py_utils.NestedMap(flops=100, out_shapes=(logits,))\n\n def _FProp2D(self,\n theta,\n inputs,\n class_weights,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Computes xent loss and log-prob logit.\"\"\"\n p = self.params\n inputs = self._GetInputs(inputs)\n logits = self.Logits(theta, inputs)\n if class_probabilities is not None:\n per_example_xent, per_example_argmax = self.XentLossFromLogits(\n theta, logits, class_weights, class_ids, class_probabilities)\n elif p.chunk_size:\n class_ids = py_utils.HasShape(class_ids, [-1, 1])\n per_example_xent, per_example_argmax = self._XentLossByChunk(\n theta, inputs, class_ids)\n elif p.num_sampled == 0 or self.do_eval:\n per_example_xent, per_example_argmax = self.XentLossFromLogits(\n theta, logits, class_weights, class_ids, class_probabilities)\n else:\n # Use sampled soft-max in training mode with p.num_sampled set.\n assert p.num_sampled > 0\n assert p.use_bias\n tf.logging.vlog(\n 0, 'Using sampled_softmax_loss(..., num_sampled=%d, '\n 'num_classes=%d) in SimpleFullSoftmax::_FProp2D', p.num_sampled,\n p.num_classes)\n per_example_xent = tf.nn.sampled_softmax_loss(\n weights=[theta['weight_%d' % i] for i in range(p.num_shards)],\n biases=tf.concat([theta['bias_%d' % i] for i in range(p.num_shards)],\n axis=0),\n labels=tf.reshape(class_ids, [-1, 1]),\n inputs=self._GetInputs(inputs),\n num_sampled=p.num_sampled,\n num_classes=p.num_classes,\n seed=p.random_seed)\n # Avoid computing logits; per_example_argmax is going to be always right.\n per_example_argmax = tf.identity(class_ids)\n\n label_weights = tf.reshape(\n tf.cast(class_weights, py_utils.FPropDtype(p)), [-1])\n total_xent = tf.reduce_sum(per_example_xent * label_weights)\n total_weights = tf.reduce_sum(label_weights)\n \"\"\"\n log_probs=tf.nn.log_softmax(logits)\n per_example_argmax=per_example_argmax\n per_example_xent=per_example_xent\n per_example_weight=label_weights\n total_xent=total_xent\n total_weight=total_weights\n avg_xent=total_xent / total_weights\n return logits,log_probs,per_example_argmax,per_example_xent,per_example_weight,total_xent,total_weight,avg_xent\n \"\"\"\n return py_utils.NestedMap(\n logits=logits,\n log_probs=tf.nn.log_softmax(logits),\n per_example_argmax=per_example_argmax,\n per_example_xent=per_example_xent,\n per_example_weight=label_weights,\n total_xent=total_xent,\n total_weight=total_weights,\n avg_xent=total_xent / total_weights)\n\n \"\"\"\n else:\n\n self.log_probs = tf.nn.log_softmax(logits)\n self.per_example_argmax = per_example_argmax\n self.per_example_xent=per_example_xent\n self.per_example_weight=label_weights\n self.total_xent=total_xent\n self.total_weight=total_weights\n self.avg_xent=total_xent / total_weights\n return logits\n \"\"\"\n def FProp(self,\n theta,\n inputs,\n paddings=None,#this must be none ideally (ive added this separately)\n class_weights=None,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Computes logit, cross entropy etc.\n\n This function can both work with class_ids, or probability distributions\n over classes. Exactly one of class_ids or class_probabilities must be\n provided.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: a list of a single tensor, or a single tensor with the shape [...,\n input_dim].\n class_weights: a tensor with shape [...] containing the weights for each\n target word.\n class_ids: a tensor with shape [..., 1] of int32 dtype containing the\n target class labels.\n class_probabilities: a tensor with shape [..., num_classes] of float\n values indicating class-membership probabilities.\n\n Returns:\n A `.NestedMap` containing the following fields\n\n - logits: with shape [..., num_classes]. Unnormalized softmax's logits.\n - per_example_argmax: with shape [...]. argmax of i-th example.\n - per_example_xent: with shape [...]. Cross entropy between i-th example's\n prediction and its label.\n - per_example_weight: with shape [...]. class_weights casted to\n this layer's dtype.\n - total_xent: A scalar. The sum of per_example_weight * per_example_xent.\n - total_weight: A scalar. The sum of per_example_weight.\n - avg_xent: A scalar. total_loss / total_weight.\n \"\"\"\n p = self.params\n if(class_weights == None and class_ids == None):\n class_weights = self.class_weights[:16]\n class_ids = self.class_ids[:16]\n # Consolidate list/single value into a list.\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n # If inputs are matrices already, delegate to _FProp2D.\n if inputs[0].shape.ndims == 2:\n return self._FProp2D(theta, inputs, class_weights, class_ids,\n class_probabilities)\n\n # Remembers the original shape[1:-1].\n shape_mid = tf.shape(inputs[0])[1:-1]\n\n # Reshape inputs to matrices, labels to vectors, etc.\n inputs = [\n tf.reshape(x, py_utils.ToStaticShape([-1, p.input_dim])) for x in inputs\n ]\n class_weights = tf.reshape(class_weights, [-1])\n if class_ids is not None:\n class_ids = tf.reshape(class_ids, [-1, 1])\n if class_probabilities is not None:\n class_probabilities = tf.reshape(class_probabilities, [-1, p.num_classes])\n\n # Delegates to _FProp2D.\n xent_loss = self._FProp2D(theta, inputs, class_weights, class_ids,\n class_probabilities)\n\n # Reshapes xent_loss fields according to the inputs' shape.\n xent_loss.logits = tf.reshape(\n xent_loss.logits, tf.concat([[-1], shape_mid, [p.num_classes]], axis=0))\n\n per_example_shape = tf.concat([[-1], shape_mid], axis=0)\n xent_loss.per_example_argmax = tf.reshape(xent_loss.per_example_argmax,\n per_example_shape)\n\n xent_loss.per_example_xent = tf.reshape(xent_loss.per_example_xent,\n per_example_shape)\n\n xent_loss.per_example_weight = tf.reshape(xent_loss.per_example_weight,\n per_example_shape)\n return xent_loss\n\n def XentLossFromLogits(self,\n theta,\n logits,\n class_weights,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Computes cross-entropy, argmax etc. from logits.\"\"\"\n p = self.params\n assert logits is not None\n if class_probabilities is not None:\n per_example_xent = tf.nn.softmax_cross_entropy_with_logits(\n labels=class_probabilities, logits=logits)\n per_example_argmax = py_utils.ArgMax(logits)\n elif p.num_sampled == 0 or self.do_eval:\n assert class_ids is not None\n tf.logging.vlog(\n 0, 'Using sparse_softmax_cross_entropy_with_logits() in '\n 'SimpleFullSoftmax::_FProp2D logits_shape=%r',\n py_utils.GetShape(logits))\n per_example_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.reshape(class_ids, [-1]), logits=logits)\n per_example_argmax = py_utils.ArgMax(logits)\n else:\n raise ValueError(\n 'This set of arguments is not supported for XentLossFromLogits.')\n return per_example_xent, per_example_argmax\n\n\nclass SharedSoftmaxLayer(SimpleFullSoftmax):\n \"\"\"Shared softmax layer for decoder embedding/softmax matrix.\"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Params for SharedSoftmaxLayer.\"\"\"\n p = super().Params()\n p.Define(\n 'scale_sqrt_depth', False, 'If set True, activations are scaled'\n ' with sqrt(input_dim) in EmbLookup.')\n p.Define(\n 'embedding_dim', 0, 'Set to be compatible with embedding layer, '\n ' and it is equivalent to input_dim')\n p.Define(\n 'vocab_size', 0, 'Set to be compatible with embedding layer, and '\n 'it is equivalent to num_classes')\n return p\n\n def EmbLookup(self, theta, ids):\n p = self.params\n ids = py_utils.with_dependencies([\n py_utils.assert_between(\n ids,\n 0,\n p.num_classes,\n summarize=100000,\n message='{}:class_id_validation'.format(p.cls))\n ], ids)\n\n wm = self._ConcatWeights(theta).wm\n if not self._transpose_weight_params:\n wm = tf.transpose(wm)\n embs_result = tf.gather(wm, ids)\n\n if p.scale_sqrt_depth:\n embs_result *= p.input_dim**0.5\n\n return embs_result\n\n\nclass SingleShardFullSoftmax(SoftmaxLayer):\n \"\"\"Full softmax layer.\"\"\"\n\n def __init__(self, params):\n \"\"\"Constructs a SingleShardFullSoftmax layer.\"\"\"\n super().__init__(params)\n p = self.params\n assert p.name\n linear_p = builder_layers.LinearLayer.Params().Set(\n name='linear', input_dims=p.input_dim, output_dims=p.num_classes)\n self.CreateChild('linear', linear_p)\n bias_p = builder_layers.BiasLayer.Params().Set(\n name='bias', dims=p.num_classes)\n self.CreateChild('bias', bias_p)\n\n def Logits(self, theta, inputs):\n \"\"\"Returns the logits computed before the softmax.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: A single tensor with shape [..., input_dim].\n\n Returns:\n logits [..., num_classes]\n \"\"\"\n p = self.params\n if isinstance(inputs, (list, tuple)):\n assert len(inputs) == 1\n inputs = inputs[0]\n after_proj = self.linear.FProp(theta.linear, inputs)\n logits = self.bias.FProp(theta.bias, after_proj)\n # Clip logits by range.\n # Note that this is generally not used in conjunction with quantization and\n # shouldn't be needed at inference time as the quantized matmul above will\n # take care of clipping naturally based on the data type and qparams.\n abs_max = p.logits_abs_max\n if abs_max is not None and not p.is_inference:\n abs_min = -abs_max # pylint: disable=invalid-unary-operand-type\n logits = py_utils.clip_by_value(logits, abs_min, abs_max)\n return logits\n\n def XentLossFromLogits(self,\n theta,\n logits,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Computes cross-entropy, argmax etc. from logits.\"\"\"\n assert logits is not None\n if class_probabilities is not None:\n per_example_xent = tf.nn.softmax_cross_entropy_with_logits(\n labels=class_probabilities, logits=logits)\n per_example_argmax = tf.stop_gradient(py_utils.ArgMax(logits))\n else:\n assert class_ids is not None\n fpdtype = logits.dtype\n if fpdtype == tf.bfloat16:\n # This is needed in order to workaround the limitation that\n # tf.nn.sparse_softmax_cross_entropy_with_logits is not implemented for\n # bf16 on cpu.\n logits = tf.cast(logits, tf.float32)\n per_example_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=class_ids, logits=logits)\n if fpdtype == tf.bfloat16:\n per_example_xent = tf.cast(per_example_xent, fpdtype)\n\n per_example_argmax = tf.stop_gradient(py_utils.ArgMax(logits))\n return per_example_xent, per_example_argmax\n\n def XentLossByChunk(self, theta, activation, class_ids, class_probabilities):\n \"\"\"Computes per-example xent loss.\"\"\"\n p = self.params\n\n act_orig_shape = tf.shape(activation)\n batch_size = act_orig_shape[0]\n chunk_size = p.chunk_size\n num_chunks = batch_size // chunk_size\n\n num_chunks = py_utils.with_dependencies([\n py_utils.assert_equal(\n 0,\n tf.math.floormod(batch_size, chunk_size),\n summarize=2,\n message='assert_equal')\n ], num_chunks)\n\n def ReshapeX(x):\n if x is None:\n return None\n x_shape = tf.shape(x)\n new_shape = tf.concat([[num_chunks, chunk_size], x_shape[1:]], 0)\n return tf.reshape(x, new_shape)\n\n activation = ReshapeX(activation)\n class_ids = ReshapeX(class_ids)\n class_probabilities = ReshapeX(class_probabilities)\n\n # For each chunk, we compute logits of activation[i, :, :],\n # and its xent loss with class_ids[i, :].\n def ChunkFn(theta, state0, inputs):\n del state0\n activation = inputs.activation\n class_ids = inputs.get('class_ids', None)\n class_probabilities = inputs.get('class_probabilities', None)\n logits = self.Logits(theta, activation)\n per_example_xent, per_example_argmax = self.XentLossFromLogits(\n theta, logits, class_ids, class_probabilities)\n return py_utils.NestedMap(\n xent=per_example_xent, amax=per_example_argmax), py_utils.NestedMap()\n\n inputs_nmap = py_utils.NestedMap(activation=activation)\n if class_ids is not None:\n inputs_nmap.class_ids = class_ids\n if class_probabilities is not None:\n inputs_nmap.class_probabilities = class_probabilities\n\n xent_state0 = tf.zeros(tf.shape(activation)[1:-1], dtype=p.dtype)\n argmax_out_dtype = tf.int32 if py_utils.use_tpu() else tf.int64\n amax_state0 = tf.zeros(tf.shape(activation)[1:-1], dtype=argmax_out_dtype)\n\n acc, _ = recurrent.Recurrent(\n theta=theta,\n state0=py_utils.NestedMap(xent=xent_state0, amax=amax_state0),\n inputs=inputs_nmap,\n cell_fn=ChunkFn)\n\n # acc.xent has the shape [dim0, dim1]. acc.xent[i, :] are\n # per-example xent loss for examples in the i-th chunk. We\n # reshape acc.xent to a vector and slice the first 'batch' values.\n def GetBatch(x):\n return tf.reshape(x, act_orig_shape[:-1])\n\n return GetBatch(acc.xent), GetBatch(acc.amax)\n\n def FProp(self,\n theta,\n inputs,\n class_weights,\n class_ids=None,\n class_probabilities=None):\n \"\"\"Computes logits, cross entropy etc.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: a single tensor with shape [..., input_dim].\n class_weights: a tensor with shape [..., 1] containing the weights for\n each target word.\n class_ids: a tensor with shape [..., 1] of int32 dtype containing the\n target class labels.\n class_probabilities: a tensor with shape [..., num_classes] of float\n values indicating class-membership probabilities.\n\n Returns:\n A `.NestedMap` containing the following fields\n\n - logits: with shape [..., num_classes]. Unnormalized softmax's logits.\n - per_example_argmax: with shape [...]. argmax of i-th example.\n - per_example_xent: with shape [...]. Cross entropy between i-th example's\n prediction and its label.\n - per_example_weight: with shape [...]. class_weights casted to\n this layer's dtype.\n - total_xent: A scalar. The sum of per_example_weight * per_example_xent.\n - total_weight: A scalar. The sum of per_example_weight.\n - avg_xent: A scalar. total_loss / total_weight.\n \"\"\"\n p = self.params\n if isinstance(inputs, (list, tuple)):\n assert len(inputs) == 1\n inputs = inputs[0]\n\n inputs_shape = tf.shape(inputs)\n ids_shape = tf.concat([inputs_shape[:-1], [1]], 0)\n probs_shape = tf.concat([inputs_shape[:-1], [p.num_classes]], 0)\n\n class_weights = py_utils.HasShape(class_weights, ids_shape)\n class_weights = tf.squeeze(class_weights, -1)\n if class_ids is not None:\n class_ids = py_utils.HasShape(class_ids, ids_shape)\n class_ids = tf.squeeze(class_ids, -1)\n if class_probabilities is not None:\n class_probabilities = py_utils.HasShape(class_probabilities, probs_shape)\n\n if (not self.do_eval) and (p.chunk_size > 0):\n # Chunking.\n logits = None\n log_probs = None\n per_example_xent, per_example_argmax = self.XentLossByChunk(\n theta, inputs, class_ids, class_probabilities)\n else:\n logits = self.Logits(theta, inputs)\n log_probs = tf.nn.log_softmax(logits)\n per_example_xent, per_example_argmax = self.XentLossFromLogits(\n theta, logits, class_ids, class_probabilities)\n\n label_weights = tf.cast(class_weights, py_utils.FPropDtype(p))\n total_xent = tf.reduce_sum(per_example_xent * label_weights)\n total_weights = tf.reduce_sum(label_weights)\n output_nmap = py_utils.NestedMap(\n per_example_argmax=per_example_argmax,\n per_example_xent=per_example_xent,\n per_example_weight=label_weights,\n total_xent=total_xent,\n total_weight=total_weights,\n avg_xent=total_xent / (total_weights + 1e-6))\n if logits is not None:\n output_nmap.logits = logits\n output_nmap.log_probs = log_probs\n return output_nmap\n\n\nclass SingleShardSharedEmbeddingSoftmax(SingleShardFullSoftmax):\n \"\"\"A shared softmax/embedding layer.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('vocab_size', 0, 'Num tokens in vocab.')\n p.Define('embedding_dim', 0, 'Depth of the output.')\n p.Define(\n 'scale_sqrt_depth', False, 'If set True, activations are scaled'\n ' with sqrt(embedding_dim) in EmbLookup.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.vocab_size == p.num_classes\n assert p.embedding_dim == p.input_dim\n\n def EmbLookupDefaultTheta(self, ids):\n return self.EmbLookup(self.theta, ids)\n\n def EmbLookup(self, theta, ids):\n \"\"\"Looks up embedding vectors for ids.\n\n Args:\n theta: Named tuple with the weight matrix for the embedding.\n ids: A rank-N int32 tensor.\n\n Returns:\n A rank-(N+1) params.dtype tensor.\n embs[indices, :] is the embedding vector for ids[indices].\n \"\"\"\n p = self.params\n ids = tf.convert_to_tensor(ids)\n ids = py_utils.with_dependencies([\n py_utils.assert_between(\n ids, 0, p.vocab_size, name='vocab_id_validation')\n ], ids)\n # TODO(yonghui): Get rid of this extra copy (tf.transpose).\n emb_vars = tf.transpose(theta.linear.w)\n embs = tf.nn.embedding_lookup(emb_vars, tf.reshape(ids, [-1]))\n if p.scale_sqrt_depth:\n embs *= p.embedding_dim**0.5\n if p.vn.global_vn or p.vn.per_step_vn:\n embs = py_utils.AddGlobalVN(p, embs)\n out_shape = tf.concat([tf.shape(ids), [p.embedding_dim]], 0)\n return tf.reshape(embs, out_shape)\n\n\nclass ConvSoftmax(quant_utils.QuantizableLayer):\n \"\"\"A softmax implementation based on 1x1 convolution.\n\n On TPU this is much more memory efficient than MatMul after reshaping logits\n to a matrix.\n \"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Params for SoftmaxLayer.\"\"\"\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the input.')\n p.Define('hidden_dim', 0, 'Dimension of the hidden layer.')\n p.Define('num_classes', 0, 'Total number of target classes.')\n return p\n\n def _CreateLayerVariables(self):\n \"\"\"Constructs a SimpleFullSoftmax layer.\"\"\"\n super()._CreateLayerVariables()\n p = self.params\n if p.hidden_dim:\n w_proj_pc = py_utils.WeightParams(\n shape=(1, p.input_dim, p.hidden_dim),\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('w_proj', w_proj_pc)\n w_pc = py_utils.WeightParams(\n shape=(1, p.hidden_dim or p.input_dim, p.num_classes),\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('w', w_pc)\n self.CreateVariable(\n 'b',\n py_utils.WeightParams(\n shape=[p.num_classes],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars']))\n\n def Logits(self, theta, inputs):\n p = self.params\n with tf.name_scope(p.name):\n if inputs.shape.ndims == 2:\n # [batch, time, depth]\n x = inputs[:, tf.newaxis, :]\n else:\n x = py_utils.HasShape(inputs, [-1, -1, -1])\n if p.hidden_dim:\n x = tf.nn.conv1d(x, theta.w_proj, 1, 'VALID')\n logits = tf.nn.bias_add(tf.nn.conv1d(x, theta.w, 1, 'VALID'), theta.b)\n if inputs.shape.ndims == 2:\n return logits[:, 0, :]\n else:\n return logits\n\n\nclass DropoutLayer(base_layer.BaseLayer):\n \"\"\"Apply dropout during trainig.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_prob', 1.0, 'Keep probability.')\n # noise_shape is unknown when building layer params.\n p.Define(\n 'noise_shape', None, 'A 1-D `Tensor` of type `int32`, representing'\n ' the shape for randomly generated keep/drop flags.')\n p.Define(\n 'noise_shape_broadcast_dims', None,\n 'A list of dimension where the noise shape is broadcasted. For '\n 'example, noise_shape = [n, h, w, 1] when '\n 'noise_shape_broadcast_dims=[-1] ')\n # We typically want to replace dropout by expectation during eval.\n # However, in certain cases E(f(x)) != f(E(x)), and replacing dropout by its\n # expectation during eval leads to worse quality.\n p.Define('dropout_at_eval', False,\n 'Whether or not to also perform dropout at eval time.')\n return p\n\n def _Dropout(self, theta, inputs, noise_shape):\n return tf.nn.dropout(\n inputs,\n rate=1 - self.params.keep_prob,\n noise_shape=noise_shape,\n seed=self.params.random_seed)\n\n @classmethod\n def NumOutputNodes(cls, p):\n # The layer does element-wise processing thus is input-shape agnostic.\n return\n\n def FProp(self, theta, inputs):\n \"\"\"Apply dropout to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor.\n\n Returns:\n inputs with dropout applied at training time.\n \"\"\"\n p = self.params\n if not self.do_eval or p.dropout_at_eval:\n if isinstance(p.keep_prob, numbers.Real) and p.keep_prob == 1.0:\n return inputs\n if p.noise_shape_broadcast_dims:\n noise_shape = p.noise_shape or py_utils.GetShape(inputs)\n for dim in p.noise_shape_broadcast_dims:\n if dim >= len(noise_shape):\n raise ValueError('Invalid broadcasted dim {}'.format(dim))\n noise_shape[dim] = 1\n else:\n noise_shape = p.noise_shape\n ret = self._Dropout(theta, inputs, noise_shape)\n ret.set_shape(inputs.get_shape())\n return ret\n else:\n return inputs\n\n @classmethod\n def FPropMeta(cls, p, inputs, *args):\n py_utils.CheckShapes((inputs,))\n flops_per_element = 10 # Approximately 10 flops per element.\n return py_utils.NestedMap(\n flops=inputs.num_elements() * flops_per_element, out_shapes=(inputs,))\n\n\nclass DeterministicDropoutLayer(DropoutLayer):\n \"\"\"Apply dropout during trainig.\"\"\"\n\n def _Dropout(self, theta, inputs, noise_shape):\n return py_utils.DeterministicDropout(\n inputs,\n keep_prob=self.params.keep_prob,\n seeds=py_utils.GenerateStepSeedPair(self.params, theta.global_step),\n noise_shape=noise_shape)\n\n\nclass LayerNorm(base_layer.BaseLayer):\n \"\"\"Layer normalization.\n\n Implements layer normalization:\n https://arxiv.org/abs/1607.06450\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Depth of the input to the network.')\n p.Define('epsilon', 1e-6, 'Tiny value to guard rsqrt.')\n p.Define('use_fused_layernorm', False, 'Whether to use fused layernorm.')\n p.Define(\n 'direct_scale', False, 'Whether to apply scale directly '\n 'without a +1.0. Var is initialized to 1.0 instead. This makes '\n 'the layer weight-compatible with the implementation in '\n 'contrib.layers.')\n\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.input_dim > 0, p.input_dim\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n pc = py_utils.WeightParams(\n shape=[p.input_dim],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'] +\n [py_utils.SKIP_LP_REGULARIZATION])\n self.CreateVariable('bias', pc)\n\n if p.direct_scale:\n scale_pc = py_utils.WeightParams(\n shape=[p.input_dim],\n init=py_utils.WeightInit.Constant(1.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'] +\n [py_utils.SKIP_LP_REGULARIZATION])\n else:\n scale_pc = pc\n self.CreateVariable('scale', scale_pc)\n\n def _GetScaleAndBias(self, theta):\n return theta.scale, theta.bias\n\n def FProp(self, theta, inputs):\n \"\"\"Applies normalization over the last dimension (layer).\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: A tensor of shape [..., hidden_dim].\n\n Returns:\n tensor of the same shape with inputs\n \"\"\"\n if py_utils.testonly_skip_norm_layers():\n return inputs\n\n p = self.params\n inputs = py_utils.with_dependencies(\n [py_utils.assert_equal(tf.shape(inputs)[-1], p.input_dim)], inputs)\n\n cur_scale, cur_bias = self._GetScaleAndBias(theta)\n\n if p.direct_scale:\n scale = cur_scale\n else:\n scale = 1.0 + cur_scale\n\n if p.use_fused_layernorm:\n counts, means_ss, variance_ss, _, = tf.nn.sufficient_statistics(\n inputs, axes=[-1], keepdims=True)\n mean, variance = tf.nn.normalize_moments(counts, means_ss, variance_ss,\n None)\n # Adding a cast here. Sometimes, inputs/mean/variance/p.epsilon are in\n # float32 while scale and cur_bias are in bf16.\n inputs_norm = tf.cast(\n (inputs - mean) * tf.math.rsqrt(variance + p.epsilon),\n dtype=scale.dtype)\n return inputs_norm * scale + cur_bias\n\n def Normalize(xs):\n \"\"\"Normalize `xs.x` w/ `xs.scale` and `xs.bias` gain/shift.\"\"\"\n x_shape = py_utils.GetShape(xs.x)\n inner_dim = x_shape[-1]\n x_reshaped = tf.reshape(xs.x, [-1, inner_dim])\n mean = tf.reduce_mean(x_reshaped, axis=[1], keepdims=True)\n variance = tf.reduce_mean(\n tf.square(x_reshaped - mean), axis=[1], keepdims=True)\n if variance.dtype == tf.bfloat16:\n # tf.rsqrt is not implemented for bfloat16, hence we always cast into\n # tf.float32.\n x_norm_den_inv = tf.cast(\n tf.math.rsqrt(tf.cast(variance + p.epsilon, tf.float32)),\n x_reshaped.dtype)\n else:\n x_norm_den_inv = tf.cast(\n tf.math.rsqrt(variance + p.epsilon), x_reshaped.dtype)\n x_norm = (x_reshaped - mean) * x_norm_den_inv\n x_norm = tf.reshape(x_norm, x_shape)\n return x_norm * xs.scale + xs.bias\n\n return py_utils.CallDefun(\n Normalize, py_utils.NestedMap(x=inputs, scale=scale, bias=cur_bias))\n\n @classmethod\n def NumOutputNodes(cls, p):\n return p.input_dim\n\n @classmethod\n def FPropMeta(cls, p, inputs):\n py_utils.CheckShapes((inputs,))\n return py_utils.NestedMap(\n flops=inputs.num_elements() * 10, out_shapes=(inputs,))\n\n\nclass CategoricalLayerNorm(LayerNorm):\n \"\"\"Categorical layer normalization.\n\n Allow dynamic switch of normalization params based on given class_index.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_classes', 1,\n 'Number of privatized copies of layer norm params.')\n return p\n\n def _BiasVarName(self, i):\n return 'bias_' + str(i)\n\n def _ScaleVarName(self, i):\n return 'scale_' + str(i)\n\n def _CreateLayerVariables(self):\n # Skip LayerNorm's _CreateLayerVariables() as bias and scale variables will\n # be created in this function.\n super(LayerNorm, self)._CreateLayerVariables() # pylint: disable=bad-super-call\n p = self.params\n pc = py_utils.WeightParams(\n shape=[self.params.input_dim],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'] +\n [py_utils.SKIP_LP_REGULARIZATION])\n for i in range(p.num_classes):\n self.CreateVariable(self._BiasVarName(i), pc)\n self.CreateVariable(self._ScaleVarName(i), pc)\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert isinstance(p.num_classes, int)\n assert p.num_classes > 0\n self.AddExtraTheta('class_index', tf.constant(0, dtype=tf.int32))\n\n def _GetScaleAndBias(self, theta):\n p = self.params\n with tf.control_dependencies(\n [py_utils.assert_between(theta.class_index, 0, p.num_classes)]):\n biases = [theta[self._BiasVarName(i)] for i in range(p.num_classes)]\n cur_bias = tf.gather(biases, theta.class_index)\n scales = [theta[self._ScaleVarName(i)] for i in range(p.num_classes)]\n cur_scale = tf.gather(scales, theta.class_index)\n return cur_scale, cur_bias\n\n\nclass ConvSetLayer(quant_utils.QuantizableLayer):\n \"\"\"Set of Convolutions with different filter sizes in a single layer.\n\n Applies a set of convolutions with different filter shapes to the inputs and\n returns the concatenated outputs.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('cnn_tpl',\n ConvLayer.Params().Set(filter_stride=(1, 1)),\n 'Conv layer template for the set of conv layers.')\n p.Define(\n 'filter_shapes', [(0, 0, 0, 0)],\n 'Must be a list of sequences of 4. Elements are in order of height'\n ' (time), width (frequency), in_channel, out_channel')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n\n filter_set = set()\n input_shape = None\n # Asserting kernel sizes are different and input sizes are the same.\n for filter_shape in p.filter_shapes:\n key = '%d_%d' % (filter_shape[0], filter_shape[1])\n assert key not in filter_set\n filter_set.add(key)\n if input_shape is None:\n input_shape = filter_shape[2]\n assert input_shape == filter_shape[2]\n\n params_conv_set = []\n for filter_shape in p.filter_shapes:\n conv_p = p.cnn_tpl.Copy()\n conv_p.name = '%d_%d' % (filter_shape[0], filter_shape[1])\n # Important: combined quantization will be done pre-concat versus\n # by each layer on its output. Otherwise, inherit quantization params\n # from this layer.\n if p.qdomain.default is not None:\n conv_p.qdomain.default = p.qdomain.default.Copy()\n conv_p.disable_activation_quantization = True\n conv_p.filter_shape = filter_shape\n params_conv_set.append(conv_p)\n self.CreateChildren('conv_set', params_conv_set)\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n # The same QTensor is used for all inputs to the concat.\n self.TrackQTensor('activation')\n\n def FProp(self, theta, inputs, paddings):\n \"\"\"Apply all convolution sets to inputs and concatenate outputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor. It is expected to be of shape [batch,\n time].\n\n Returns:\n A tuple (out, output_paddings).\n\n - out: output tensor. Expected to be of shape [batch, time_mod,\n frequency_mod, out_channel_1 + out_channel_2 ...] where time_mod and\n frequency_mod depend on the conv layer strides and out_channel_i is\n the output channel size of the i-th conv layer in the set.\n - output_paddings: Modified paddings generated within `ConvLayer.FProp`.\n Expected to be of the shape [batch, time_mod].\n \"\"\"\n p = self.params\n inputs = py_utils.with_dependencies([\n py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),\n py_utils.assert_shape_match(\n tf.shape(inputs),\n tf.concat([tf.shape(paddings), [-1, p.filter_shapes[0][2]]], 0))\n ], inputs)\n\n conv_outputs = []\n output_paddings = None\n # output_padding should be same for all filters for the same stride.\n for i, conv_i in enumerate(self.conv_set):\n conv_i_output, conv_i_padding = conv_i.FProp(theta.conv_set[i], inputs,\n paddings)\n if output_paddings is None:\n output_paddings = conv_i_padding\n conv_outputs.append(conv_i_output)\n\n # Track for quantization.\n conv_outputs = [self.QTensor('activation', t) for t in conv_outputs]\n\n out = tf.concat(conv_outputs, -1)\n return out, output_paddings\n\n\nclass LocalizedLabelSmoother(base_layer.BaseLayer):\n \"\"\"Smooths labels given as class ids.\n\n Implements the smoothing from https://arxiv.org/abs/1612.02695. Instead of\n 1-hot class ids the model is trained to predict a distribution over classes\n that includes the correct class label and with a small probability the labels\n of tokens that appear nearby in time in the ground truth. This typically acts\n as a strong regularizer.\n\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_classes', 0, 'Number of classes')\n p.Define(\n 'offsets', [], 'Offset (over time) for smoothing. At time T the '\n 'smoothed target is class[T] + sum_i weights[i]*class[T+offset[i]]')\n p.Define('weights', [], 'Weight of the smoothing at corresponding offset')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.num_classes > 0\n assert len(p.offsets) == len(p.weights)\n assert p.name\n\n def FProp(self, theta, target_paddings, target_labels, target_ids):\n \"\"\"Convert class_ids to 1hot and smooth by neighborhood.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n target_paddings: float32 matrix [bs, seq_len]\n target_labels: int32 matrix [bs, seq_len]. This stores the target label\n output at each decoder step as generated by the speech input generator\n input_batch.tgt.labels\n target_ids: int32 matrix [bs, seq_len]. This stores the target_id that is\n fed to the decoder, as generated by the speech input generator\n input_batch.tgt.ids\n\n Returns:\n A tensor [bs, seq_len, num_classes] denoting a smoothed distribution over\n num_classes.\n \"\"\"\n del target_ids # Unused.\n p = self.params\n class_probabilities = tf.one_hot(\n target_labels, p.num_classes, dtype=py_utils.FPropDtype(p))\n\n # Start list keeping the scaled class-probabilities at different offsets.\n output_distributions = [class_probabilities]\n seq_len = tf.shape(class_probabilities)[1]\n # If offsets < 0 we force a future output_act to be like a past token.\n # If offsets > 0 we force a past output_act to be like a future token.\n min_offset = np.min(p.offsets + [0])\n max_offset = np.max(p.offsets + [0])\n class_probabilities = tf.pad(class_probabilities,\n [[0, 0], [-min_offset, max_offset], [0, 0]])\n # Shift the weights to the left by one location - we don't make the\n # EOS more probable.\n class_weights = tf.pad(1.0 - target_paddings[:, 1:],\n [[0, 0], [-min_offset, max_offset + 1]])\n class_weights = tf.expand_dims(class_weights, 2)\n\n for offset, weight in zip(p.offsets, p.weights):\n offset_in_padded = offset - min_offset\n output_distributions.append(\n class_probabilities[:, offset_in_padded:offset_in_padded + seq_len, :]\n * class_weights[:, offset_in_padded:offset_in_padded + seq_len, :] *\n weight)\n output_distributions = tf.add_n(output_distributions)\n output_distributions /= tf.reduce_sum(\n output_distributions, axis=-1, keepdims=True)\n return output_distributions\n\n\nclass UniformLabelSmoother(base_layer.BaseLayer):\n \"\"\"Smooths labels given as class ids and confidence.\n\n Implements the smoothing from https://arxiv.org/abs/1512.00567. Correct class\n label confidence is dropped by eps and all the other classes are increased\n by eps/num_classes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_classes', 0, 'Number of classes')\n p.Define('uncertainty', 0.1, 'Uncertainty of correct label, eps.')\n p.Define(\n 'uncertainty_larger', 0.1,\n 'Apply a larger uncertainty to specific tokens, as specified '\n 'by token_from_target_ids.')\n p.Define('token_id_uncertainty_larger', None, 'Id of token from target_ids '\n 'to apply uncertainty_larger to.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.num_classes > 0\n assert 0.0 <= p.uncertainty < 1.0\n assert p.token_id_uncertainty_larger is None or (\n p.token_id_uncertainty_larger >= 0)\n assert p.name\n\n def FProp(self, theta, target_paddings, target_labels, target_ids):\n \"\"\"Convert target_labels to 1hot and smooth uniformly.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n target_paddings: float32 matrix [bs, seq_len]\n target_labels: int32 matrix [bs, seq_len]. This stores the target label\n output at each decoder step as generated by the speech input generator\n input_batch.tgt.labels\n target_ids: int32 matrix [bs, seq_len]. This stores the target_id that is\n fed to the decoder, as generated by the speech input generator\n input_batch.tgt.ids\n\n Returns:\n A tensor of float32 [bs, seq_len, num_classes] denoting a smoothed\n distribution over num_classes.\n \"\"\"\n del target_paddings # Unused by FProp.\n p = self.params\n\n low_confidence = p.uncertainty / tf.cast(p.num_classes - 1, tf.float32)\n high_confidence = (1.0 - p.uncertainty)\n\n smooth_targets = tf.one_hot(\n tf.cast(target_labels, tf.int32),\n depth=p.num_classes,\n on_value=high_confidence,\n off_value=low_confidence)\n if p.token_id_uncertainty_larger is not None:\n assert target_ids is not None\n low_confidence_larger = p.uncertainty_larger / tf.cast(\n p.num_classes - 1, tf.float32)\n high_confidence_larger = (1.0 - p.uncertainty_larger)\n smooth_targets_larger = tf.one_hot(\n tf.cast(target_labels, tf.int32),\n depth=p.num_classes,\n on_value=high_confidence_larger,\n off_value=low_confidence_larger)\n should_smooth_larger = tf.tile(\n tf.expand_dims(\n tf.equal(target_ids, p.token_id_uncertainty_larger), -1),\n multiples=[1, 1, p.num_classes])\n smooth_targets = tf.where(should_smooth_larger, smooth_targets_larger,\n smooth_targets)\n return smooth_targets\n\n\nclass HighwaySkipLayer(base_layer.BaseLayer):\n \"\"\"A highway skip layer.\n\n This class represents a highway skip layer, which takes multiple\n inputs (from different layers of the network) and gates them.\n This returns C(x)x + T(x)h, initially biasing C to be open.\n For some discussion about initialization please see:\n Section 2.2 in [Srivastava, 2015]: https://arxiv.org/pdf/1505.00387v2.pdf\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the input to the network.')\n p.Define(\n 'batch_norm', False,\n 'Whether or not to apply BN to the highway skip layer output. '\n 'Note this is only a single bool.')\n p.Define('carry_bias_init', 1.0, 'carry gates bias initialization')\n p.Define('couple_carry_transform_gates', False,\n 'Boolean on whether to couple the transform and carry gates.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n carry_gate_params = ProjectionLayer.Params().Set(\n batch_norm=p.batch_norm,\n has_bias=True,\n activation='SIGMOID',\n input_dim=p.input_dim,\n output_dim=p.input_dim,\n bias_init=p.carry_bias_init,\n name='%s_carry_gate' % p.name)\n self.CreateChild('carry_gate', carry_gate_params)\n\n if not p.couple_carry_transform_gates:\n transform_gate_params = ProjectionLayer.Params().Set(\n batch_norm=p.batch_norm,\n has_bias=True,\n activation='SIGMOID',\n input_dim=p.input_dim,\n output_dim=p.input_dim,\n bias_init=-p.carry_bias_init,\n name='%s_transform_gate' % p.name)\n self.CreateChild('transform_gate', transform_gate_params)\n\n def FProp(self, theta, x, transformed_x, paddings=None):\n \"\"\"Fprop for Highway Skip layer.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n x: feature at the lower layer.\n transformed_x: transformation of x at a higher layer.\n paddings: padding applied to the features.\n\n Returns:\n layer_out - activations after forward propagation.\n \"\"\"\n p = self.params\n assert self.carry_gate is not None\n carry = self.carry_gate.FProp(theta.carry_gate, x, paddings)\n if p.couple_carry_transform_gates:\n transform = 1 - carry\n else:\n assert self.transform_gate is not None\n transform = self.transform_gate.FProp(theta.transform_gate, x, paddings)\n layer_out = x * carry + transformed_x * transform\n return layer_out\n\n\nclass GatingLayer(base_layer.BaseLayer):\n \"\"\"A gating layer.\n\n This class represents a gating layer, which takes 2 inputs of the same shape\n and gates them.\n\n The output is: carry * x + (1 - carry) * y where, carry is given by\n sigmoid(x @ w_1 + y @ w_2 + bias).\n\n This is different from the HighwaySkipLayer above in that carry is also a\n function of y (named transformed_x in HighwaySkipLayer).\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the input to the network.')\n p.Define('has_bias', False, 'Whether carry has a bias term.')\n p.Define('carry_bias_init', 0.0, 'carry gates bias initialization')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n carry_gate_params = ProjectionLayer.Params().Set(\n batch_norm=False,\n has_bias=p.has_bias,\n activation='SIGMOID',\n input_dim=p.input_dim * 2,\n output_dim=p.input_dim,\n bias_init=p.carry_bias_init,\n name='carry')\n self.CreateChild('carry_gate', carry_gate_params)\n\n def FProp(self, theta, x, y, paddings=None):\n \"\"\"Fprop for the gating layer.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n x: An input feature, the last dimension must match p.input_dim.\n y: Another input feature. Must have the same shape as 'x'.\n paddings: padding applied to the features. When x and y have shape [...,\n input_dim], 'paddings', when specified, must have shaped [..., 1], where\n all but the last dimension match.\n\n Returns:\n layer_out - activations after forward propagation. Same shape as x and y.\n \"\"\"\n y = py_utils.with_dependencies(\n [py_utils.assert_shape_match(tf.shape(x), tf.shape(y))], y)\n carry = self.carry_gate.FProp(theta.carry_gate, tf.concat([x, y], axis=-1),\n paddings)\n layer_out = x * carry + y * (1 - carry)\n return layer_out\n\n\nclass GradNormTracker(base_layer.BaseLayer):\n \"\"\"A helper class to keep track of gradient norm stats.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('decay', 0.995,\n 'Decay in updating the moving avgs in grad norm stats')\n p.Define('grad_norm_lower_cap', 1e-2, 'The minimal gradient norm value.')\n p.Define(\n 'clip_threshold', 4.0,\n 'Distance threshold at which gradients are clipped to 0.0.'\n ' Distance is measured in the number of standard deviations a'\n ' given gradient norm is from the mean gradient norm. The'\n ' default value of 4.0 means we are throwing away roughly'\n ' 0.15% of steps.')\n p.Define(\n 'grad_norm_clip_cap_min', 0.0,\n 'We stop clipping if grad norm is already smaller than this'\n ' value.')\n p.Define(\n 'dry_run', False, 'If True, always return 1.0 in FProp() to signify '\n 'no grad clipping suggested, in which case the class only collects '\n 'stats and summaries.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n self._decay = params.decay\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n\n pc = py_utils.WeightParams(\n shape=[],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=tf.float32,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('log_mean', pc, trainable=False)\n self.CreateVariable('log_mean_squared', pc, trainable=False)\n self.CreateVariable('total_weight', pc, trainable=False)\n self.CreateVariable('total_rejections', pc, trainable=False)\n\n def FProp(self, theta, grad_norm, has_nan=None):\n \"\"\"Update gradient norm moving avgs, and returns whether or not ...\n\n to clip gradients to 0.0. If the current batch has NaN grads, does not\n update the moving avgs and forces to clip the gradients to 0.0.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n grad_norm: A float scalar tensor.\n has_nan: A boolean scalar tensor to indicate if the current batch has nan.\n\n Returns:\n A scalar float tensor with value of either 1.0 or 0.0. The value of 0.0\n means the gradient norm is excessively large or contains NaN, and the step\n should be aborted completely.\n \"\"\"\n p = self.params\n with tf.name_scope(p.name):\n grad_norm = tf.maximum(grad_norm, p.grad_norm_lower_cap)\n\n # Exponentially decayed moving avg of log(grad_norm) mean.\n mean = theta.log_mean / tf.maximum(theta.total_weight, 1e-6)\n # Exponentially decayed moving avg of log(grad_norm) variance.\n var = ((theta.log_mean_squared / tf.maximum(theta.total_weight, 1e-6)) -\n mean * mean)\n std = tf.sqrt(tf.maximum(var, 1e-6))\n\n summary_utils.scalar('log_grad_norm_mean', mean)\n summary_utils.scalar('log_grad_norm_std', std)\n summary_utils.scalar('clip_ratio_threshold',\n tf.exp(std * p.clip_threshold))\n summary_utils.scalar('clip_threshold',\n tf.exp(mean + std * p.clip_threshold) - 1.0)\n summary_utils.scalar('total_rejections', theta.total_rejections)\n\n log_grad_norm = tf.math.log(grad_norm + 1.0)\n log_grad_norm_cap = tf.cast(mean + std * p.clip_threshold, tf.float32)\n log_grad_norm_cap_min = tf.math.log(p.grad_norm_clip_cap_min + 1.0)\n log_grad_norm_cap = tf.maximum(log_grad_norm_cap, log_grad_norm_cap_min)\n\n def UpdateExpMovingAvg(ref_var, val, ignore):\n if ignore is not None:\n delta = tf.where(ignore, tf.zeros([]),\n (1.0 - p.decay) * (val - ref_var))\n else:\n delta = (1.0 - p.decay) * (val - ref_var)\n return tf.assign_add(ref_var, delta)\n\n # We trigger when total_weight is at least half of max weight or the\n # current batch contains NaNs.\n trigger = tf.math.logical_and(log_grad_norm > log_grad_norm_cap,\n theta.total_weight > 0.75)\n if has_nan is not None:\n trigger = tf.math.logical_or(trigger, has_nan)\n\n log_grad_norm_capped = tf.minimum(log_grad_norm, log_grad_norm_cap)\n\n update_moving_avg = tf.group(\n UpdateExpMovingAvg(self.vars.log_mean, log_grad_norm_capped, has_nan),\n UpdateExpMovingAvg(self.vars.log_mean_squared,\n log_grad_norm_capped * log_grad_norm_capped,\n has_nan),\n UpdateExpMovingAvg(self.vars.total_weight, tf.constant(1.0), has_nan),\n tf.assign_add(self.vars.total_rejections,\n tf.cast(trigger, tf.float32)))\n\n return py_utils.with_dependencies([update_moving_avg],\n 1.0 if p.dry_run else 1.0 -\n tf.cast(trigger, tf.float32))\n\n\nclass WeightedSumLayer(base_layer.BaseLayer):\n \"\"\"Returns the weighted sum of a list of input tensors.\"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Params for this MergerLayer class.\"\"\"\n p = super().Params()\n p.Define('num_sources', 0, 'Number of input sources to combine.')\n p.Define('weighted_merger_dropout_prob', 0.1,\n 'Applies dropout to the weights.')\n p.Define(\n 'weighted_merger_softmax', True, 'If set, applies a softmax '\n 'layer on top of the weights for normalization.')\n p.Define('global_weight_scale', 1.0, 'A global scale put on weights.')\n p.Define('minimal_prob', 0.0, 'The minimal weight for each component.')\n p.Define('add_weight_summaries', False, 'If set, creates summaries for the '\n 'sum weights.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if not p.name:\n raise ValueError('Layer must have a specified name!')\n\n assert p.num_sources > 0, ('Must specify num_sources > 0.')\n\n if p.weighted_merger_dropout_prob > 0.0:\n dropout_tpl = DropoutLayer.Params()\n dropout_tpl.keep_prob = (1.0 - p.weighted_merger_dropout_prob)\n self.CreateChild('weighted_merger_dropout', dropout_tpl)\n else:\n self.CreateChild('weighted_merger_dropout', IdentityLayer.Params())\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n params_init = py_utils.WeightInit.Constant(0.0)\n # Weights to be learned.\n pw = py_utils.WeightParams(\n shape=[p.num_sources],\n init=params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('sum_weight', pw)\n\n def FProp(self, theta, inputs):\n \"\"\"Combines the list of input tensors into a single tensor.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: A list of tensors of shape [time, batch, hidden_dim]\n\n Returns:\n A tensor of the same shape with input tensors.\n \"\"\"\n p = self.params\n n_sources = len(inputs)\n\n if n_sources == 1:\n return inputs[0]\n\n # Weighted sum of all sources, all dims must match.\n # For weighted_sum, assume input is a list of rank 3 tensors\n inputs = tf.stack(inputs)\n inputs = py_utils.HasRank(inputs, 4)\n\n # The constant factor is just meant to support the non-normalized scenario.\n # If softmax is applied, this factor will cancel out.\n w = theta.sum_weight * p.global_weight_scale + (1 / p.num_sources)\n w = self.weighted_merger_dropout.FProp(theta.weighted_merger_dropout, w)\n\n if p.weighted_merger_softmax:\n residual_weights = p.minimal_prob * p.num_sources\n assert residual_weights >= 0.0\n assert residual_weights < 1.0\n w = tf.nn.softmax(w, axis=0) * (1.0 - residual_weights) + p.minimal_prob\n\n if p.add_weight_summaries:\n for i in range(p.num_sources):\n summary_utils.scalar(p.name + 'weight_%d' % i, w[i])\n w = tf.reshape(w, [p.num_sources, 1, 1, 1])\n output = tf.reduce_sum(inputs * w, axis=0)\n\n return output\n\n\nclass GatedAverageLayer(base_layer.BaseLayer):\n \"\"\"Gated combination of n input vectors.\n\n Given n inputs, x_1 ... x_n. First learns a gate g in a single layer.\n Returns g_1 * x_1 + ... g_n * x_n.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_nodes', 0, 'Number of nodes in each input vector.')\n p.Define('num_inputs', 0, 'Number of input vectors to combine.')\n return p\n\n def __init__(self, params):\n \"\"\"Initializes GatedAverageLayer.\"\"\"\n super().__init__(params)\n p = self.params\n\n assert p.num_nodes > 0, 'Number of dimensions should be greater than 0.'\n assert p.num_inputs > 0, 'Number of inputs should be greater than 0.'\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n in_size = p.num_inputs * p.num_nodes\n\n # Weight matrix for scalar gates\n gm_pc = py_utils.WeightParams(\n shape=[in_size, p.num_inputs],\n init=p.params_init,\n dtype=p.dtype,\n collections=self._VariableCollections())\n self.CreateVariable('gm', gm_pc)\n\n def FProp(self, theta, inputs):\n \"\"\"Gates, then merges a list of n input vectors.\n\n Args:\n theta: gm (gate matrix)\n inputs: List of inputs, each of shape [..., num_nodes]\n\n Returns:\n a gated output vector [..., num_nodes]\n \"\"\"\n p = self.params\n assert len(inputs) == p.num_inputs, 'Number of inputs should match params.'\n\n for i, inp in enumerate(inputs):\n inputs[i] = py_utils.with_dependencies([\n py_utils.assert_shape_match([tf.shape(inp)[-1]], [p.num_nodes]),\n py_utils.assert_shape_match(tf.shape(inp), tf.shape(inputs[0])),\n ], inp)\n\n input_shape = tf.shape(inputs[0])\n\n reshaped_inputs = [tf.reshape(inp, [-1, p.num_nodes]) for inp in inputs]\n concat_inputs = tf.concat(reshaped_inputs, axis=1)\n\n xmg = tf.nn.softmax(py_utils.Matmul(concat_inputs, theta.gm))\n xmg = tf.expand_dims(xmg, 2)\n inputs = tf.reshape(concat_inputs, [-1, p.num_inputs, p.num_nodes])\n gated_sum = tf.reduce_sum(xmg * inputs, axis=1)\n\n return tf.reshape(gated_sum, input_shape)\n\n\nclass LHUCLayer(base_layer.BaseLayer):\n \"\"\"`Learning Hidden Unit Contribution (LHUC)` layer.\n\n This paper proposes to use LHUC layer for NMT adaptation:\n http://aclweb.org/anthology/N18-2080\n\n During base model training, LHUC layer is fixed to 1.0 (no-op in\n multiplication). During adaptation, only LHUC layer is trained, and all other\n parameters in the model are frozen.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the input and output.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.input_dim > 0\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n pc = py_utils.WeightParams(\n shape=[p.input_dim],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=self._VariableCollections())\n self.CreateVariable('w', pc)\n\n def FProp(self, theta, inp):\n \"\"\"Add learnt gate for adaptation.\"\"\"\n out = 2.0 * tf.sigmoid(theta.w) * inp\n return out\n\n\nclass ResidualAdapterLayer(base_layer.BaseLayer):\n \"\"\"Residual Adapter layer for NLP tasks.\n\n This paper proposes using residual adapters for fine-tuning new tasks on BERT.\n https://arxiv.org/pdf/1902.00751.pdf\n\n During adaptation, residual adapter layers can be added to a pre-trained\n model and trained, while all other parameters are frozen.\n In terms of operations, the layer is identical to a vanilla Transformer\n feedforward layer. Separate implementation is meant to distinguish function.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the input to the adapter.')\n p.Define('bottleneck_dim', 0, 'Dimension of the feedforward inner layer.')\n p.Define('ln_tpl', LayerNorm.Params(), 'Layer norm default params.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n\n bottleneck_params = FeedForwardNet.Params().Set(\n name='bottleneck',\n activation=['RELU', 'NONE'],\n input_dim=p.input_dim,\n hidden_layer_dims=[p.bottleneck_dim, p.input_dim])\n self.CreateChild('bottleneck', bottleneck_params)\n\n params = p.ln_tpl.Copy()\n params.name = 'adapter_ln'\n params.input_dim = p.input_dim\n self.CreateChild('layer_norm', params)\n\n def FProp(self, theta, x, paddings=None):\n \"\"\"Fprop for Residual Adapter.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n x: [..., input_dim].\n paddings: padding applied to the features.\n\n Returns:\n layer_out - [..., input_dim].\n \"\"\"\n normalized_x = self.layer_norm.FProp(theta.layer_norm, x)\n bottleneck_x = self.bottleneck.FProp(theta.bottleneck, normalized_x,\n paddings)\n return x + bottleneck_x\n\n\ndef Conv2DFlops(inputs, filter_shape, stride, padding):\n \"\"\"Returns number of float operations (mult/adds) for a Conv2D op.\n\n Args:\n inputs: the input shape. Must have four elements.\n filter_shape: the convolution filter shape. Must have four elements.\n stride: the strides along height and width, respectively.\n padding: 'SAME' or 'VALID'.\n\n Returns:\n Number of multiplications and additions.\n \"\"\"\n b, h, w = inputs[0], inputs[1], inputs[2]\n fh, fw, ic, oc = filter_shape\n sh, sw = stride\n\n def _CeilDiv(x, y):\n return tf.math.floordiv(x + y - 1, y)\n\n if padding == 'SAME':\n oh = _CeilDiv(h, sh)\n ow = _CeilDiv(w, sw)\n else:\n assert padding == 'VALID'\n oh = _CeilDiv(h - fh + 1, sh)\n ow = _CeilDiv(w - fw + 1, sw)\n # Mul/add counts as 2 flops.\n return (tf.cast(b * oh * ow, tf.int64) *\n tf.cast(fh * fw * ic * oc, tf.int64) * 2)\n\n\nclass Conv2DLayerNoPadding(base_layer.BaseLayer):\n \"\"\"2-D Convolution layer w/o padding.\n\n TODO(laurenzo): Dedup in favor of SeparableConv2DLayer where possible.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'filter_shape', (0, 0, 0, 0),\n 'Filter shape. Must be a sequence of length 4. Elements are in'\n ' the order of height (time), width (frequency), in_channel,'\n ' out_channel. ')\n p.Define(\n 'filter_stride', (0, 0),\n 'Filter stride to use. Must be a pair of ints. The first int'\n ' specifies the stride on the height dimension. The second int'\n ' specifies the stride on the width dimension.')\n p.Define(\n 'dilations', (1, 1), ' An optional list of ints. Defaults to [1, 1]. '\n '1-D tensor of length 2. The dilation factor for each dimension '\n 'of input. If set to k > 1, there will be k-1 skipped cells '\n 'between each filter element on that dimension.')\n p.Define('padding', 'SAME', 'SAME|VALID')\n\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.padding in ['SAME', 'VALID']\n assert len(p.filter_shape) == 4\n assert len(p.filter_stride) == 2\n assert len(p.dilations) == 2\n assert all(x > 0 for x in p.filter_stride)\n\n def _CreateLayerVariables(self):\n super()._CreateLayerVariables()\n p = self.params\n w_pc = py_utils.WeightParams(\n shape=p.filter_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n self.CreateVariable('w', w_pc)\n\n def FProp(self, theta, x):\n \"\"\"Apply convolution to inputs.\n\n Args:\n theta: A NestedMap object containing weights' values of this layer and its\n children layers.\n x: The inputs tensor. It is expected to be of shape [batch, height, width,\n channel].\n\n Returns:\n Convolution output.\n \"\"\"\n p = self.params\n with tf.name_scope(p.name):\n computation_cost.Add(\n self, 'flops',\n Conv2DFlops(\n tf.shape(x),\n filter_shape=symbolic.EvalExpr(symbolic.TENSOR_VALUES,\n p.filter_shape),\n stride=p.filter_stride,\n padding=p.padding))\n return tf.nn.conv2d(\n input=x,\n filters=theta.w,\n strides=[1, p.filter_stride[0], p.filter_stride[1], 1],\n padding=p.padding,\n dilations=[1, p.dilations[0], p.dilations[1], 1],\n data_format='NHWC')\n\n @classmethod\n def FPropMeta(cls, p, inputs):\n py_utils.CheckShapes((inputs,))\n b, h, w, c = inputs\n fh, fw, ic, oc = p.filter_shape\n assert ic == c\n sh, sw = p.filter_stride\n if p.padding == 'SAME':\n oh = sympy.ceiling(h / sh)\n ow = sympy.ceiling(w / sw)\n else:\n oh = sympy.ceiling((h - fh + 1) / sh)\n ow = sympy.ceiling((w - fw + 1) / sw)\n flops = b * oh * ow * fh * fw * ic * oc * 2 # mul/add counts as 2 flop.\n outputs = tshape.Shape([b, oh, ow, oc])\n return py_utils.NestedMap(flops=flops, out_shapes=(outputs,))\n\n\nclass FetchLayer(base_layer.BaseLayer):\n \"\"\"A layer facilitating fetching activations and their gradients.\"\"\"\n\n def __init__(self, params):\n super().__init__(params)\n assert self.params.name\n self._activations = None\n self._gradients = None\n\n @classmethod\n def FPropMeta(cls, params, *args):\n return py_utils.NestedMap(flops=0, out_shapes=args)\n\n def _ReturnSingleValueOrList(self, lst):\n assert lst is not None\n assert isinstance(lst, list)\n return lst if len(lst) > 1 else lst[0]\n\n @property\n def activation(self):\n return self._ReturnSingleValueOrList(self._activations)\n\n @property\n def gradient(self):\n return self._ReturnSingleValueOrList(self._gradients)\n\n def FProp(self, theta, *args):\n del theta\n num = len(args)\n self._activations = [None] * num\n self._gradients = [None] * num\n\n for i, v in enumerate(args):\n\n def FetchBak(xs, ys, dys, index=i):\n del xs, ys\n self._gradients[index] = dys\n return dys\n\n def FetchFwd(x):\n return x\n\n self._activations[i] = py_utils.CallDefun(FetchFwd, v, bak=FetchBak)\n\n return tuple(self._activations) if num > 1 else self._activations[0]\n\n\nclass GluLayer(base_layer.BaseLayer):\n \"\"\"Gated Linear Unit.\n\n See https://arxiv.org/abs/1612.08083 for more details.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Dimension of the layer input.')\n p.Define('output_dim', 0, 'Dimension of the layer output.')\n p.Define('ln_tpl', LayerNorm.Params(), 'Layer norm default params.')\n p.Define('dense_tpl', FCLayer.Params().Set(), 'Fully connected layer.')\n p.Define(\n 'activation', 'RELU',\n 'Non-linearity applied after the dense layer in the value branch.')\n p.Define('dropout_tpl', DropoutLayer.Params(), 'Dropout applied to output.')\n p.Define('apply_residual', True, 'Whether or not to add inputs to outputs.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n assert p.input_dim\n\n if p.output_dim:\n output_dim = p.output_dim\n else:\n output_dim = p.input_dim\n\n if p.apply_residual:\n assert output_dim == p.input_dim\n\n # Initialize value feed-forward layer.\n params = p.dense_tpl.Copy()\n params.name = 'value_layer'\n params.input_dim = p.input_dim\n params.activation = p.activation\n params.output_dim = output_dim\n self.CreateChild('value_layer', params)\n\n # Initialize gate feed-forward layer.\n params = p.dense_tpl.Copy()\n params.name = 'gate_layer'\n params.input_dim = p.input_dim\n params.activation = 'SIGMOID'\n params.output_dim = output_dim\n self.CreateChild('gate_layer', params)\n\n # Initialize layer norm.\n params = p.ln_tpl.Copy()\n params.name = 'layer_norm'\n params.input_dim = p.input_dim\n self.CreateChild('layer_norm', params)\n\n # Initialize dropout.\n dropout_tpl = p.dropout_tpl.Copy()\n self.CreateChild('dropout', dropout_tpl)\n\n def FProp(self, theta, inputs, paddings):\n inputs_normalized = self.layer_norm.FProp(theta.layer_norm, inputs)\n values = self.value_layer.FProp(theta.value_layer, inputs_normalized,\n tf.expand_dims(paddings, -1))\n gates = self.gate_layer.FProp(theta.gate_layer, inputs_normalized,\n tf.expand_dims(paddings, -1))\n glu_output = values * gates\n glu_output = self.dropout.FProp(theta.dropout, glu_output)\n if self.params.apply_residual:\n return inputs + glu_output\n return glu_output\n\n\nclass MultitaskAdapterLayer(base_layer.BaseLayer):\n \"\"\"Residual adapter layer for multilingual models.\n\n Residual adapters can be used to fine-tune a single model to multiple\n domains, tasks, or languages: https://arxiv.org/pdf/1902.00751.pdf\n\n Each adapter consists of a \"down\" projection to a smaller dimension followed\n by an \"up\" projection, the result of which is added back to the input\n activation. The projection weights and biases are task-specific.\n\n Whereas ResidualAdapterLayer learns and applies the parameters for a single\n task, this layer learns and applies the parameters for multiple tasks so that\n we have a single model serving the different tasks. The parameters can be\n trained for all tasks at the same time, or in one-off per-task training jobs.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_tasks', 0, 'Number of tasks.')\n p.Define('input_dim', 0, 'Dimension of the input to the adapter.')\n p.Define('bottleneck_dim', 0, 'Dimension of the bottleneck.')\n p.Define('layer_norm_tpl', LayerNorm.Params(), 'Layer norm default params.')\n p.Define(\n 'projection_params_init', None,\n 'Weight initialization for up and down projections. Only used for '\n 'weights, not biases. If None, uses default weight init, which is '\n 'typically Xavier with scale of 1.0.')\n p.Define(\n 'data_format', 'TBC', 'String(enum) specifying the input and output '\n 'data format for this layer. Supported formats: '\n '\"TBC\": [time, batch, input_dim] and \"BTC\": [batch, time, input_dim].')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n # Data format is either 'TBC' (time-major) or 'BTC' (batch-major).\n assert p.data_format in ('TBC', 'BTC')\n base_emb_params = EmbeddingLayer.Params().Set(\n vocab_size=p.num_tasks, max_num_shards=1)\n down_proj_w_params = base_emb_params.Copy()\n down_proj_w_params.Set(\n embedding_dim=p.input_dim * p.bottleneck_dim, name='down_proj_w')\n if p.projection_params_init:\n down_proj_w_params.params_init = p.projection_params_init\n down_proj_b_params = base_emb_params.Copy()\n down_proj_b_params.Set(embedding_dim=p.bottleneck_dim, name='down_proj_b')\n up_proj_w_params = base_emb_params.Copy()\n up_proj_w_params.Set(\n embedding_dim=p.bottleneck_dim * p.input_dim, name='up_proj_w')\n if p.projection_params_init:\n up_proj_w_params.params_init = p.projection_params_init\n up_proj_b_params = base_emb_params.Copy()\n up_proj_b_params.Set(embedding_dim=p.input_dim, name='up_proj_b')\n\n self.CreateChild('down_proj_w', down_proj_w_params)\n self.CreateChild('down_proj_b', down_proj_b_params)\n self.CreateChild('up_proj_w', up_proj_w_params)\n self.CreateChild('up_proj_b', up_proj_b_params)\n params = p.layer_norm_tpl.Copy()\n params.name = 'adapter_ln'\n params.input_dim = p.input_dim\n self.CreateChild('layer_norm', params)\n\n def FProp(self, theta, inputs, tasks):\n \"\"\"Fprop for multitask adapter.\n\n Args:\n theta: A NestedMap object containing weights' values of this layer and its\n children layers.\n inputs: A tensor containing the activations from the previous layer. For\n 'TBC', the shape is [time, batch, input_dim] and for 'BTC', it's [batch,\n time, input_dim].\n tasks: An int32 tensor containing the task ID for each input. If 'tasks'\n is of rank 2, we assume it to be of shape [time, batch] if 'BTC' and\n [batch, time] if 'TBC', indicating a different task for each timestep.\n In this case we look up adapter params for each timestep. If 'tasks' is\n of rank 1, we assume it to be of shape [batch], indicating a single task\n for all timesteps of a sequence. This latter setup uses substantially\n less memory and is generally preferred.\n\n Returns:\n A tensor containing the adapted activations with shape\n [time, batch, input_dim] for 'TBC' and [batch, time, input_dim] for 'BTC'.\n \"\"\"\n p = self.params\n inputs_shape = tf.shape(inputs)\n per_timestep_task = (tasks.shape.ndims == 2)\n batch_index = 1 if p.data_format == 'TBC' else 0\n time_index = 1 - batch_index\n inputs = py_utils.with_dependencies(\n [\n # Checks that inputs has 3 dimensions, last is hidden dim.\n py_utils.assert_shape_match(inputs_shape, [-1, -1, p.input_dim]),\n # Checks that inputs and tasks have same batch dimension.\n py_utils.assert_shape_match([inputs_shape[batch_index]], [\n tf.shape(tasks)[batch_index]\n if per_timestep_task else tf.shape(tasks)[0]\n ])\n ],\n inputs)\n\n # To support different task for each timetstep, flatten inputs and\n # tasks. Below, 'batch' now refers to flattened batch size, time * batch.\n if per_timestep_task:\n tasks = py_utils.with_dependencies(\n [\n # Checks that inputs and tasks have same time dimension.\n py_utils.assert_shape_match(inputs_shape[:1],\n tf.shape(tasks)[:1])\n ],\n tasks)\n tasks = tf.reshape(tasks, [-1])\n if p.data_format == 'TBC':\n inputs = tf.reshape(inputs, [1, -1, p.input_dim])\n else:\n inputs = tf.reshape(inputs, [-1, 1, p.input_dim])\n\n # Lookup all weights and biases\n # [batch] -> [batch, hidden * k] -> [batch, hidden, k]\n down_weights = tf.reshape(\n self.down_proj_w.EmbLookup(theta.down_proj_w, tasks),\n [-1, p.input_dim, p.bottleneck_dim])\n # [batch] -> [batch, k] -> [1, batch, k] if 'TBC' else [batch, 1, k]\n down_biases = tf.expand_dims(\n self.down_proj_b.EmbLookup(theta.down_proj_b, tasks), time_index)\n # [batch] -> [batch, k * hidden] -> [batch, k, hidden]\n up_weights = tf.reshape(\n self.up_proj_w.EmbLookup(theta.up_proj_w, tasks),\n [-1, p.bottleneck_dim, p.input_dim])\n # [batch] -> [batch, h] -> [1, batch, h] if 'TBC' else [batch, 1, h]\n up_biases = tf.expand_dims(\n self.up_proj_b.EmbLookup(theta.up_proj_b, tasks), time_index)\n\n # Layer norm -> down-projection -> non-linearity -> up-projection\n norm_inputs = self.layer_norm.FProp(theta.layer_norm, inputs)\n # If per_timestep_task, t = 1, b = time * batch.\n # Otherwise, t = time, b = batch.\n if p.data_format == 'TBC':\n down_projected = tf.einsum('tbh,bhk->tbk', norm_inputs, down_weights)\n else:\n down_projected = tf.einsum('bth,bhk->btk', norm_inputs, down_weights)\n down_projected += down_biases\n down_projected = tf.nn.relu(down_projected)\n if p.data_format == 'TBC':\n up_projected = tf.einsum('tbk,bkh->tbh', down_projected, up_weights)\n else:\n up_projected = tf.einsum('btk,bkh->bth', down_projected, up_weights)\n up_projected += up_biases\n output = inputs + up_projected\n\n # Unflatten output:\n # for 'TBC': [1, time * batch, hidden] -> [time, batch, hidden]\n # for 'BTC': [1, batch * time, hidden] -> [batch, time, hidden]\n if per_timestep_task:\n output = tf.reshape(output, inputs_shape)\n return output\n\n\nclass CCTGatingNetwork(quant_utils.QuantizableLayer):\n \"\"\"A gating network that is continous for training and discrete for eval.\n\n Based on the gating network from https://arxiv.org/abs/2002.07106.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Depth of the input to the network.')\n p.Define('hidden_layer_dim', 0, 'Depth of the hidden layer outputs.')\n p.Define('num_outputs', 0, 'Number of scalar gate outputs.')\n p.Define('noise_std', 1.0, 'Standard deviation for gating noise.')\n p.Define('noise_warmup_steps', 1.0, 'Steps to full noise.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n params = schedule.PolynomialSchedule.Params()\n params.start = (0, 0.0)\n params.limit = (p.noise_warmup_steps, p.noise_std)\n self.CreateChild('noise_std', params)\n\n params = FeedForwardNet.Params()\n params.name = 'gating_layer'\n params.input_dim = p.input_dim\n params.activation = ['RELU', 'NONE']\n params.hidden_layer_dims = [p.hidden_layer_dim, p.num_outputs]\n self.CreateChild('gatingfflayer', params)\n\n def FProp(self, theta, inputs, paddings=None):\n p = self.params\n p_c = self.gatingfflayer.FProp(theta.gatingfflayer, inputs, paddings)\n if self.do_eval:\n ones = tf.ones(tf.shape(p_c), py_utils.FPropDtype(p))\n zeros = tf.zeros(tf.shape(p_c), py_utils.FPropDtype(p))\n p_c = tf.where(\n tf.greater_equal(p_c, tf.constant(0.0, dtype=py_utils.FPropDtype(p))),\n ones, zeros)\n else:\n noise_std = self.noise_std.FProp(theta.noise_std, theta.global_step)\n noise = py_utils.DeterministicVN(\n p,\n py_utils.GenerateStepSeedPair(p, theta.global_step),\n tf.shape(p_c),\n std=noise_std)\n p_c = tf.nn.sigmoid(p_c + noise)\n return p_c\n\n @classmethod\n def FPropMeta(cls, p, inputs, paddings=None):\n py_utils.CheckShapes((inputs,))\n assert inputs[-1] == p.input_dim\n flops = 0\n in_dim = inputs[-1]\n other_dims = inputs.num_elements() / in_dim\n flops = 5 * other_dims * in_dim * p.hidden_layer_dim\n flops = 5 * other_dims * p.num_outputs * p.hidden_layer_dim\n out_shape = tshape.Shape(inputs[:-1] + [symbolic.ToStatic(p.num_outputs)])\n return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))\n\n\nclass CondScaleShiftFFNLayer(base_layer.BaseLayer):\n \"\"\"Feature Modulation layer.\n\n https://distill.pub/2018/feature-wise-transformations/\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('input_dim', 0, 'Depth of the input.')\n p.Define('output_dim', 0, 'Depth of the output.')\n p.Define('ffn', FeedForwardNet.Params(), 'Projection layer params')\n p.Define('scale_fn', 'NONE',\n 'The activation function to use for scale output')\n p.Define('shift_fn', 'NONE',\n 'The activation function to use for shift output')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n\n output_dim = p.output_dim * 2 # 1st split for shift, 2nd split for scale\n params_ffn = p.ffn.Copy().Set(\n input_dim=p.input_dim, name='{}_ffn'.format(p.name))\n params_fcout = FCLayer.Params().Copy().Set(\n input_dim=params_ffn.hidden_layer_dims[-1],\n output_dim=output_dim,\n activation='NONE',\n name='{}_fcout'.format(p.name))\n self.CreateChild('ffn', params_ffn)\n self.CreateChild('fcout', params_fcout)\n\n def FProp(self, theta, inputs, paddings=None):\n \"\"\"Calculate scale shift and modify input.\n\n Args:\n theta: params.\n inputs: The input tensor. Shaped [..., input_dim].\n paddings: The input padding tensors.\n\n Returns:\n Output after calculating shift and scale (2 tensors).\n Shaped [..., output_dim].\n \"\"\"\n p = self.params\n\n ffn_output = self.ffn.FProp(theta.ffn, inputs, paddings)\n fcout_output = self.fcout.FProp(theta.fcout, ffn_output, paddings)\n scale_output, shift_output = tf.split(\n fcout_output, num_or_size_splits=2, axis=-1)\n\n def OpWrapper(name, tensor):\n \"\"\"Wrapper for retrieve tf operations.\"\"\"\n if activations.IsSupported(name):\n op = activations.GetFn(name)\n else:\n if name == 'EXP':\n op = tf.exp\n elif name == 'NONE':\n op = tf.identity\n else:\n raise ValueError()\n return op(tensor)\n\n scale_output = OpWrapper(p.scale_fn, scale_output)\n shift_output = OpWrapper(p.shift_fn, shift_output)\n return scale_output, shift_output\n"
] | [
[
"numpy.linspace",
"numpy.asarray",
"tensorflow.python.ops.inplace_ops.empty",
"numpy.isposinf",
"numpy.isneginf",
"numpy.prod",
"numpy.array"
],
[
"numpy.min",
"tensorflow.python.ops.inplace_ops.alias_inplace_update",
"numpy.tile",
"numpy.max",
"tensorflow.python.ops.inplace_ops.alias_inplace_add",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
thegalang/lime | [
"a4cd83e20c838c782728c02f07e21ab01d17f3fa"
] | [
"lime/lime_tabular.py"
] | [
"\"\"\"\nFunctions for explaining classifiers that use tabular data (matrices).\n\"\"\"\nimport collections\nimport copy\nfrom functools import partial\nimport json\nimport warnings\n\nimport numpy as np\nimport sklearn\nimport sklearn.preprocessing\nfrom sklearn.utils import check_random_state\n\nfrom lime.discretize import QuartileDiscretizer\nfrom lime.discretize import DecileDiscretizer\nfrom lime.discretize import EntropyDiscretizer\nfrom lime.discretize import BaseDiscretizer\nfrom . import explanation\nfrom . import lime_base\n\n\nclass TableDomainMapper(explanation.DomainMapper):\n \"\"\"Maps feature ids to names, generates table views, etc\"\"\"\n\n def __init__(self, feature_names, feature_values, scaled_row,\n categorical_features, discretized_feature_names=None):\n \"\"\"Init.\n\n Args:\n feature_names: list of feature names, in order\n feature_values: list of strings with the values of the original row\n scaled_row: scaled row\n categorical_features: list of categorical features ids (ints)\n \"\"\"\n self.exp_feature_names = feature_names\n self.discretized_feature_names = discretized_feature_names\n self.feature_names = feature_names\n self.feature_values = feature_values\n self.scaled_row = scaled_row\n self.all_categorical = len(categorical_features) == len(scaled_row)\n self.categorical_features = categorical_features\n\n def map_exp_ids(self, exp):\n \"\"\"Maps ids to feature names.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n\n Returns:\n list of tuples (feature_name, weight)\n \"\"\"\n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n show_table=True,\n show_all=False):\n \"\"\"Shows the current example in a table format.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n show_table: if False, don't show table visualization.\n show_all: if True, show zero-weighted features in the table.\n \"\"\"\n if not show_table:\n return ''\n weights = [0] * len(self.feature_names)\n for x in exp:\n weights[x[0]] = x[1]\n out_list = list(zip(self.exp_feature_names,\n self.feature_values,\n weights))\n if not show_all:\n out_list = [out_list[x[0]] for x in exp]\n ret = u'''\n %s.show_raw_tabular(%s, %d, %s);\n ''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)\n return ret\n\n\nclass LimeTabularExplainer(object):\n \"\"\"Explains predictions on tabular (i.e. matrix) data.\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to the\n means and stds in the training data. For categorical features, perturb by\n sampling according to the training distribution, and making a binary\n feature that is 1 when the value is the same as the instance being\n explained.\"\"\"\n\n def __init__(self,\n training_data,\n mode=\"classification\",\n training_labels=None,\n feature_names=None,\n categorical_features=None,\n categorical_names=None,\n kernel_width=None,\n kernel=None,\n verbose=False,\n class_names=None,\n feature_selection='auto',\n discretize_continuous=True,\n discretizer='quartile',\n sample_around_instance=False,\n random_state=None):\n \"\"\"Init function.\n\n Args:\n training_data: numpy 2d array\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt (number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n sample_around_instance: if True, will sample continuous features\n in perturbed samples from a normal centered at the instance\n being explained. Otherwise, the normal is centered on the mean\n of the feature data.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n self.random_state = check_random_state(random_state)\n self.mode = mode\n self.categorical_names = categorical_names or {}\n self.sample_around_instance = sample_around_instance\n\n if categorical_features is None:\n categorical_features = []\n if feature_names is None:\n feature_names = [str(i) for i in range(training_data.shape[1])]\n\n self.categorical_features = list(categorical_features)\n self.feature_names = list(feature_names)\n\n self.discretizer = None\n if discretize_continuous:\n if discretizer == 'quartile':\n self.discretizer = QuartileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels)\n elif discretizer == 'decile':\n self.discretizer = DecileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels)\n elif discretizer == 'entropy':\n self.discretizer = EntropyDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels)\n elif isinstance(discretizer, BaseDiscretizer):\n self.discretizer = discretizer\n else:\n raise ValueError('''Discretizer must be 'quartile',''' +\n ''' 'decile', 'entropy' or a''' +\n ''' BaseDiscretizer instance''')\n self.categorical_features = list(range(training_data.shape[1]))\n discretized_training_data = self.discretizer.discretize(\n training_data)\n\n if kernel_width is None:\n kernel_width = np.sqrt(training_data.shape[1]) * .75\n kernel_width = float(kernel_width)\n\n if kernel is None:\n def kernel(d, kernel_width):\n return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))\n\n kernel_fn = partial(kernel, kernel_width=kernel_width)\n\n self.feature_selection = feature_selection\n self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)\n self.scaler = None\n self.class_names = class_names\n self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)\n self.scaler.fit(training_data)\n self.feature_values = {}\n self.feature_frequencies = {}\n\n for feature in self.categorical_features:\n if self.discretizer is not None:\n column = discretized_training_data[:, feature]\n else:\n column = training_data[:, feature]\n\n feature_count = collections.Counter(column)\n values, frequencies = map(list, zip(*(sorted(feature_count.items()))))\n\n self.feature_values[feature] = values\n self.feature_frequencies[feature] = (np.array(frequencies) /\n float(sum(frequencies)))\n self.scaler.mean_[feature] = 0\n self.scaler.scale_[feature] = 1\n\n @staticmethod\n def convert_and_round(values):\n return ['%.2f' % v for v in values]\n\n def explain_instance(self,\n data_row,\n predict_fn,\n labels=(1,),\n top_labels=None,\n num_features=10,\n num_samples=5000,\n distance_metric='euclidean',\n model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n predict_fn: prediction function. For classifiers, this should be a\n function that takes a numpy array and outputs prediction\n probabilities. For regressors, this takes a numpy array and\n returns the predictions. For ScikitClassifiers, this is\n `classifier.predict_proba()`. For ScikitRegressors, this\n is `regressor.predict()`. The prediction function needs to work\n on multiple feature vectors (the vectors randomly perturbed\n from the data_row).\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have model_regressor.coef_\n and 'sample_weight' as a parameter to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n data, inverse = self.__data_inverse(data_row, num_samples)\n scaled_data = (data - self.scaler.mean_) / self.scaler.scale_\n\n distances = sklearn.metrics.pairwise_distances(\n scaled_data,\n scaled_data[0].reshape(1, -1),\n metric=distance_metric\n ).ravel()\n\n yss = predict_fn(inverse)\n\n # for classification, the model needs to provide a list of tuples - classes\n # along with prediction probabilities\n if self.mode == \"classification\":\n if len(yss.shape) == 1:\n raise NotImplementedError(\"LIME does not currently support \"\n \"classifier models without probability \"\n \"scores. If this conflicts with your \"\n \"use case, please let us know: \"\n \"https://github.com/datascienceinc/lime/issues/16\")\n elif len(yss.shape) == 2:\n if self.class_names is None:\n self.class_names = [str(x) for x in range(yss[0].shape[0])]\n else:\n self.class_names = list(self.class_names)\n if not np.allclose(yss.sum(axis=1), 1.0):\n warnings.warn(\"\"\"\n Prediction probabilties do not sum to 1, and\n thus does not constitute a probability space.\n Check that you classifier outputs probabilities\n (Not log probabilities, or actual class predictions).\n \"\"\")\n else:\n raise ValueError(\"Your model outputs \"\n \"arrays with {} dimensions\".format(len(yss.shape)))\n\n # for regression, the output should be a one-dimensional array of predictions\n else:\n try:\n assert isinstance(yss, np.ndarray) and len(yss.shape) == 1\n except AssertionError:\n raise ValueError(\"Your model needs to output single-dimensional \\\n numpyarrays, not arrays of {} dimensions\".format(yss.shape))\n\n predicted_value = yss[0]\n min_y = min(yss)\n max_y = max(yss)\n\n # add a dimension to be compatible with downstream machinery\n yss = yss[:, np.newaxis]\n\n feature_names = copy.deepcopy(self.feature_names)\n if feature_names is None:\n feature_names = [str(x) for x in range(data_row.shape[0])]\n\n values = self.convert_and_round(data_row)\n\n for i in self.categorical_features:\n if self.discretizer is not None and i in self.discretizer.lambdas:\n continue\n name = int(data_row[i])\n if i in self.categorical_names:\n name = self.categorical_names[i][name]\n feature_names[i] = '%s=%s' % (feature_names[i], name)\n values[i] = 'True'\n categorical_features = self.categorical_features\n\n discretized_feature_names = None\n if self.discretizer is not None:\n categorical_features = range(data.shape[1])\n discretized_instance = self.discretizer.discretize(data_row)\n discretized_feature_names = copy.deepcopy(feature_names)\n for f in self.discretizer.names:\n discretized_feature_names[f] = self.discretizer.names[f][int(\n discretized_instance[f])]\n\n domain_mapper = TableDomainMapper(feature_names,\n values,\n scaled_data[0],\n categorical_features=categorical_features,\n discretized_feature_names=discretized_feature_names)\n ret_exp = explanation.Explanation(domain_mapper,\n mode=self.mode,\n class_names=self.class_names)\n ret_exp.scaled_data = scaled_data\n if self.mode == \"classification\":\n ret_exp.predict_proba = yss[0]\n if top_labels:\n labels = np.argsort(yss[0])[-top_labels:]\n ret_exp.top_labels = list(labels)\n ret_exp.top_labels.reverse()\n else:\n ret_exp.predicted_value = predicted_value\n ret_exp.min_value = min_y\n ret_exp.max_value = max_y\n labels = [0]\n\n for label in labels:\n (ret_exp.intercept[label],\n ret_exp.local_exp[label],\n ret_exp.score, ret_exp.local_pred) = self.base.explain_instance_with_data(\n scaled_data,\n yss,\n distances,\n label,\n num_features,\n model_regressor=model_regressor,\n feature_selection=self.feature_selection)\n\n if self.mode == \"regression\":\n ret_exp.intercept[1] = ret_exp.intercept[0]\n ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]\n ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]\n\n return ret_exp\n\n def __data_inverse(self,\n data_row,\n num_samples):\n \"\"\"Generates a neighborhood around a prediction.\n\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to\n the means and stds in the training data. For categorical features,\n perturb by sampling according to the training distribution, and making\n a binary feature that is 1 when the value is the same as the instance\n being explained.\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n num_samples: size of the neighborhood to learn the linear model\n\n Returns:\n A tuple (data, inverse), where:\n data: dense num_samples * K matrix, where categorical features\n are encoded with either 0 (not equal to the corresponding value\n in data_row) or 1. The first row is the original instance.\n inverse: same as data, except the categorical features are not\n binary, but categorical (as the original data)\n \"\"\"\n data = np.zeros((num_samples, data_row.shape[0]))\n categorical_features = range(data_row.shape[0])\n if self.discretizer is None:\n data = self.random_state.normal(\n 0, 1, num_samples * data_row.shape[0]).reshape(\n num_samples, data_row.shape[0])\n if self.sample_around_instance:\n data = data * self.scaler.scale_ + data_row\n else:\n data = data * self.scaler.scale_ + self.scaler.mean_\n categorical_features = self.categorical_features\n first_row = data_row\n else:\n first_row = self.discretizer.discretize(data_row)\n data[0] = data_row.copy()\n inverse = data.copy()\n for column in categorical_features:\n values = self.feature_values[column]\n freqs = self.feature_frequencies[column]\n inverse_column = self.random_state.choice(values, size=num_samples,\n replace=True, p=freqs)\n binary_column = np.array([1 if x == first_row[column]\n else 0 for x in inverse_column])\n binary_column[0] = 1\n inverse_column[0] = data[0, column]\n data[:, column] = binary_column\n inverse[:, column] = inverse_column\n if self.discretizer is not None:\n inverse[1:] = self.discretizer.undiscretize(inverse[1:])\n inverse[0] = data_row\n return data, inverse\n\n\nclass RecurrentTabularExplainer(LimeTabularExplainer):\n \"\"\"\n An explainer for keras-style recurrent neural networks, where the\n input shape is (n_samples, n_timesteps, n_features). This class\n just extends the LimeTabularExplainer class and reshapes the training\n data and feature names such that they become something like\n\n (val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)\n\n Each of the methods that take data reshape it appropriately,\n so you can pass in the training/testing data exactly as you\n would to the recurrent neural network.\n\n \"\"\"\n\n def __init__(self, training_data, mode=\"classification\",\n training_labels=None, feature_names=None,\n categorical_features=None, categorical_names=None,\n kernel_width=None, kernel=None, verbose=False, class_names=None,\n feature_selection='auto', discretize_continuous=True,\n discretizer='quartile', random_state=None):\n \"\"\"\n Args:\n training_data: numpy 3d array with shape\n (n_samples, n_timesteps, n_features)\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt(number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n\n # Reshape X\n n_samples, n_timesteps, n_features = training_data.shape\n training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(\n n_samples, n_timesteps * n_features)\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n\n # Update the feature names\n feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))\n for n in feature_names for i in range(n_timesteps)]\n\n # Send off the the super class to do its magic.\n super(RecurrentTabularExplainer, self).__init__(\n training_data,\n mode=mode,\n training_labels=training_labels,\n feature_names=feature_names,\n categorical_features=categorical_features,\n categorical_names=categorical_names,\n kernel_width=kernel_width,\n kernel=kernel,\n verbose=verbose,\n class_names=class_names,\n feature_selection=feature_selection,\n discretize_continuous=discretize_continuous,\n discretizer=discretizer,\n random_state=random_state)\n\n def _make_predict_proba(self, func):\n \"\"\"\n The predict_proba method will expect 3d arrays, but we are reshaping\n them to 2D so that LIME works correctly. This wraps the function\n you give in explain_instance to first reshape the data to have\n the shape the the keras-style network expects.\n \"\"\"\n\n def predict_proba(X):\n n_samples = X.shape[0]\n new_shape = (n_samples, self.n_features, self.n_timesteps)\n X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))\n return func(X)\n\n return predict_proba\n\n def explain_instance(self, data_row, classifier_fn, labels=(1,),\n top_labels=None, num_features=10, num_samples=5000,\n distance_metric='euclidean', model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 2d numpy array, corresponding to a row\n classifier_fn: classifier prediction probability function, which\n takes a numpy array and outputs prediction probabilities. For\n ScikitClassifiers , this is classifier.predict_proba.\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have\n model_regressor.coef_ and 'sample_weight' as a parameter\n to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n\n # Flatten input so that the normal explainer can handle it\n data_row = data_row.T.reshape(self.n_timesteps * self.n_features)\n\n # Wrap the classifier to reshape input\n classifier_fn = self._make_predict_proba(classifier_fn)\n return super(RecurrentTabularExplainer, self).explain_instance(\n data_row, classifier_fn,\n labels=labels,\n top_labels=top_labels,\n num_features=num_features,\n num_samples=num_samples,\n distance_metric=distance_metric,\n model_regressor=model_regressor)\n"
] | [
[
"numpy.sqrt",
"numpy.transpose",
"numpy.exp",
"numpy.argsort",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"sklearn.utils.check_random_state"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OllieBoyne/dog-dynamics | [
"c472f984cb04e6dea932be6a42f4daaf174fb44c"
] | [
"dynamics/dynamics.py"
] | [
"\"\"\"DEFINES THE INVERSEDYNAMICS SOLVER, A Solver for solving the joint based model of a dog.\"\"\"\n\nfrom scipy import optimize, signal\n\nfrom data.data_loader import C3DData, load_force_plate_data, ForcePlateData, SMALData, get_delay_between, DataSources, \\\n\tpath_join\nfrom vis.utils import *\nfrom vis import visualisations\nfrom dynamics.footfall_detector import FootfallDetector\nfrom tqdm import tqdm\n\n# pure constants (no optimisation needed)\ng = 9.81\nfreq_forceplate = 100 # Hz\nfoot_joint_labels = [\"front left\", \"front right\", \"rear left\", \"rear right\"]\nfoot_joint_indices = [0, 9, 23, 20] # for set 2 3r3\n\n\nclass Model:\n\t\"\"\"ID Model, with all parameters derived/optimised\"\"\"\n\n\tdef __init__(self):\n\t\t# CONSTANTS\n\t\tself.paws = {}\n\t\tself.bone_density = 1950 # Estimate - needs refining! From paper: Development of a neuromusculoskeletal computer model in a chondrodystrophic dog.\n\t\tself.muscle_density = 1060 # From above\n\n\t\t# params to optimise\n\t\tself.bone_length_definitions = {\n\t\t\t\"normal\": lambda l: dict(inner_radius=0.01, outer_radius=0.05, displacement=0),\n\t\t\t\"body\": lambda l: dict(inner_radius=l / 20, outer_radius=l / 7, displacement=l / 4 - l / 20), }\n\n\t\t# Paw parameters. All scaled to be in standard form - exponent in separate dict.\n\t\tself.paw_params_normalised = {\n\t\t\t\"L0_front\": 6.9, # 6.9 # in .1mm\n\t\t\t\"L0_rear\": 6.9, # in .1mm\n\t\t\t\"k_front\": 3.42 * .18, # in kN/m\n\t\t\t\"k_rear\": 2.0 * .21, # in kN/m\n\t\t\t\"c_front\": 20,\n\t\t\t\"c_rear\": 20,\n\t\t\t\"k_rear_prop\": 0.85, # k = k_rear * m **.85\n\t\t\t\"frame_delay\": 0 # Used for analysis of paw treadmill forces. Not used for normal ID solver\n\t\t}\n\t\tself.paw_exponents = {\n\t\t\t\"L0_front\": -4,\n\t\t\t\"L0_rear\": -4,\n\t\t\t\"k_front\": 3,\n\t\t\t\"k_rear\": 3,\n\t\t\t\"c_front\": 0,\n\t\t\t\"c_rear\": 0,\n\t\t\t\"k_rear_prop\": 0,\n\t\t\t\"frame_delay\": 0\n\t\t}\n\n\t\tself.calc_paw_params()\n\n\t\tself.freq_par_data = 200\n\n\t\t# weightings used in dynamics calculations\n\t\tself.equation_weighting = {\n\t\t\t\"Inertial\": 2,\n\t\t\t\"Rotational\": 2,\n\t\t\t\"Leg spring\": 0.5,\n\t\t\t\"Paw spring\": 1,\n\t\t}\n\n\tdef calc_paw_params(self):\n\t\t\"\"\"Calculates paw parameters (separate function for optimisation purposes)\"\"\"\n\t\tfor param, val in self.paw_params_normalised.items():\n\t\t\tself.paws[param] = val * 10 ** (self.paw_exponents[param])\n\n\tdef edit_paw_param(self, param, val):\n\t\t\"\"\"Edit paw parameter (separate for optimisation purposes)\"\"\"\n\t\tself.paw_params_normalised[param] = val\n\t\tself.calc_paw_params()\n\n\nmodel = Model()\n\n\ndef time_deriv(X, dt):\n\t\"\"\"Finds the time derivative of a given series of data.\n\tAlways treats the first dimension as time - works for any number of dimensions (n_frames, M, N, O, ...).\n\tFor all except first and last val, calcs difference over 2 timesteps\"\"\"\n\n\tdiff = np.zeros_like(X)\n\n\tdiff[0] = X[1] - X[0]\n\tdiff[1:-1] = (X[2:] - X[:-2]) / 2\n\tdiff[-1] = X[-1] - X[-2]\n\n\treturn diff * 1 / dt\n\n\ndef nth_time_deriv(X, dt, n=2):\n\t\"\"\"Recursively get the nth time derivative\"\"\"\n\n\tif n == 1:\n\t\treturn time_deriv(X, dt)\n\telse:\n\t\treturn time_deriv(nth_time_deriv(X, dt, n=n - 1), dt)\n\n\ndef get_principal_axes(vector=Vector(1, 0, 0), cardinal=np.identity(3)):\n\t\"\"\"Given a vector, devise a basis of principle axis with any two perpendicular vectors (for application of an\n\taxisymmetric object - cylinder) \"\"\"\n\n\ti, j, k = cardinal\n\tK = vector.unit()\n\t# Now find any two perp vectors to K\n\tif not K.is_parallel(i):\n\t\tI = K.cross(i).unit()\n\t\tJ = K.cross(I).unit()\n\telse:\n\t\tI = K.cross(j).unit()\n\t\tJ = K.cross(I).unit()\n\n\treturn np.array([I, J, K])\n\n\ndef I_cylinder(density, length, radius):\n\tmass = density * np.pi * (radius ** 2) * length\n\tIxx, Izz = (length ** 2) / 12 + (radius ** 2) / 4, radius ** 2 / 2\n\treturn mass * np.diag([Ixx, Ixx, Izz])\n\n\nclass DoubleCylinder:\n\t\"\"\"An object comprised of a cylinder of given length between two end points, of radius inner_radius and density bone_density,\n\tand an outer cylinder that does NOT share the same central axis, of radius outer_radius, displaced by a distance <displacement> normally from the centerline.\n\n\tCylinder is defined with the centerline vertical (z direction), and the displacement always in the normal closest to the z direction downwards.\n\n\tFor InverseDynamics calculations, this object will have a start and end index, which correspond to the joint indices in which the end point data is held.\n\t\"\"\"\n\n\tdef __init__(self, start, end, length, inner_radius, outer_radius, displacement, freq=50.0, name=\"\"):\n\n\t\tself.name = name\n\t\tself.freq = freq # Frequency, in Hz\n\n\t\tself.start = start\n\t\tself.end = end\n\n\t\tself.length = length\n\t\tself.displacement = displacement\n\n\t\tif outer_radius is None: outer_radius = inner_radius\n\n\t\tself.inner_mass = model.bone_density * np.pi * inner_radius ** 2 * self.length\n\t\tself.outer_mass = model.muscle_density * np.pi * self.length * (outer_radius ** 2 - inner_radius ** 2)\n\t\tself.mass = self.inner_mass + self.outer_mass\n\n\t\tI_bone = I_cylinder(model.bone_density, length, inner_radius)\n\t\tI_muscle = I_cylinder(model.muscle_density, length, outer_radius) - I_cylinder(model.muscle_density, length,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t inner_radius)\n\n\t\t# By parallel axis theorem, add component of I due to outer radius being displaced from the centerline axis\n\t\tI_axis_displacement = np.zeros((3, 3))\n\t\tI_axis_displacement[0, 0] = self.outer_mass * displacement ** 2\n\n\t\tself.I = I_bone + I_muscle + I_axis_displacement # Inertia tensor in a reference frame in which the bone is lengthwise facing upwards\n\n\tdef get_kinematics(self, data):\n\t\t\"\"\"Given a numpy array of time, data, of shape (n_frames, 2, 3),\n\t\tgiving the position data of both ends of the cylinder over time, compute the kinematics of the cylinder\"\"\"\n\n\t\tX = self.X = np.array(data) # positions\n\t\tV = self.V = time_deriv(X, 1 / self.freq) # velocities\n\t\tA = self.A = time_deriv(V, 1 / self.freq) # accelerations\n\n\t\tself.XG = np.mean(X, axis=1) # average over X\n\t\tself.VG = np.mean(V, axis=1) # average over V\n\t\tself.AG = np.mean(A, axis=1) # average over A\n\n\t\t# Rotational\n\t\tR = self.R = [Vector(*x[1]) - Vector(*x[0]) for x in X] # Vector from bone start to end in each frame\n\t\tlocal_axes = [get_principal_axes(r) for r in R] # Get principal axes for each frame\n\n\t\t# theta_g = (n_frame, 3) of angular rotation about i, j, k for each frame\n\t\t# angular rotation about each axis is defined as 0 for the next vector in the cycle\n\t\t# i.e. angular rotation about i = 0 for a vector parallel to j\n\t\tzero_angles = [[0, 1, 0], [0, 0, 1], [1, 0, 0]] # definition of 'zero angle' vector for i, j, k\n\t\ttheta_g = []\n\n\t\t# Compute theta_g in local axes first, where K is the unit vector\n\t\tfor n_frame in range(len(X) - 1):\n\t\t\tlocal_ax = local_axes[n_frame]\n\t\t\t# representation as a a single rotation theta about an axis e (https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula)\n\t\t\ta = R[n_frame] # rotation from one frame...\n\t\t\tb = R[n_frame + 1] # ...to the next\n\t\t\tif np.array_equal(a, b):\n\t\t\t\ttheta_g += [[0, 0, 0]] # If no rotation, return 0\n\t\t\telse:\n\t\t\t\taxis = np.cross(a, b) / (np.linalg.norm(np.cross(a, b))) # unit vector of omega\n\t\t\t\twith np.errstate(invalid='raise'):\n\t\t\t\t\ttry:\n\t\t\t\t\t\talignment = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))\n\t\t\t\t\t\talignment = np.clip(alignment, a_min=-1,\n\t\t\t\t\t\t\t\t\t\t\ta_max=1) # clip between -1 and 1 to deal with rounding errors\n\t\t\t\t\t\tangle = np.arccos(alignment) # magnitude of theta\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))\n\t\t\t\t\t\traise ValueError(\"INVALID ANGLE\", a, b)\n\n\t\t\t\ttheta_g += [axis * angle]\n\n\t\ttheta_g = np.array(theta_g)\n\t\tself.theta_g = signal.savgol_filter(theta_g, window_length=19, polyorder=2, axis=0)\n\t\tself.omega_g = time_deriv(self.theta_g, dt=1 / self.freq)\n\t\tself.alpha_g = time_deriv(self.omega_g, dt=1 / self.freq) # angular acceleration\n\n\t\tself.I_fixed = [la.T @ self.I @ la for la in local_axes] # compute I in fixed reference frame at each frame\n\n\tdef get_dynamics(self):\n\t\t\"\"\"Compute dynamics (F_net, Torque_net) at each frame\"\"\"\n\t\tself.F_net = [self.mass * a_g for a_g in self.AG]\n\t\tself.tau_net = [I_f @ alpha for (I_f, alpha) in zip(self.I_fixed, self.alpha_g)]\n\n\nclass Body(DoubleCylinder):\n\t\"\"\"A unique case of double cylinder, where the (multiple) joints connect at the cylindrical surface at either end.\n\tThese joint attachments are defined by an angle from the i direction normal to the centerline at initialisation.\n\n\tDynamics for the body then must be calculated using a separate set of equations. Define the body such that all\n\tjoints bones go into it rather than out of it (i.e. all input forces are positive on the body) \"\"\"\n\n\tdef __init__(self, start_joints, end_joints, all_joint_positions, **cylinder_kwaargs):\n\t\t\"\"\"From the indices given by start_joints and end_joints, identify a cylinder shape that best fits these\n\t\tpoints on either side, and create that as the cylinder. \"\"\"\n\n\t\tself.start_joints = start_joints\n\t\tself.end_joints = end_joints\n\n\t\tstart_pos = Vector(*np.mean(all_joint_positions[40, start_joints], axis=0))\n\t\tend_pos = Vector(*np.mean(all_joint_positions[40, end_joints], axis=0))\n\n\t\tlength = start_pos > end_pos\n\n\t\tsuper().__init__(start=None, end=None, length=length, **model.bone_length_definitions[\"body\"](length),\n\t\t\t\t\t\t **cylinder_kwaargs)\n\n\tdef get_centre_of_gravity(self, start: 'Vector', end: 'Vector'):\n\t\t\"\"\"Calculates the centre of gravity based on the displacement from the centerline.\"\"\"\n\t\tcentreline_g = 0.5 * (start + end)\n\n\t\t# to find a normal that is closest to z, find N possible equispaced normals, and see which one has the greatest .k product\n\t\tnormal = (start - end).find_normal()\n\t\tN = 20 # number of normals to consider\n\t\tall_normals = [normal.rotate_about((start - end).unit(), angle=(n * 2 * np.pi / N)) for n in range(N)]\n\n\t\tidx = np.argmax([v.dot(Vector(0, 0, -1)) for v in all_normals])\n\t\tchosen_normal = all_normals[idx] # choose most downwards normal\n\n\t\treturn centreline_g + self.displacement * chosen_normal\n\n\tdef get_kinematics(self, data):\n\t\t\"\"\"For body, data is of shape (n_frames, 2, 2, 3), where it is split by rear and front.\n\t\tSo average across rear and front to get behaviour of centerline, and then run normal get_kinematics\"\"\"\n\t\tsuper().get_kinematics(np.mean(data, axis=2))\n\n\ndef weighted_bound_least_squares(A, b, weights=None, bounds=None, **kwargs):\n\t\"\"\"Completes a least squares solve of the equation A x = b, to solve N unknowns from M equations\n\twhere A is an M x N matrix, x is an N x 1 vector, and b is an M x 1 vector.\n\tApplies weightings to each row to favour certain datapoints. weights is an M x 1 vector.\n\n\tApplies bounds where bounds is an M x 2 array. each tuple in the array gives the LB and UB for the given equation\"\"\"\n\n\tif weights is None: weights = np.ones_like(b) # If no weight given, equal weight to all\n\tw = np.array(weights)\n\n\tweighted_A, weighted_b = np.array(A) * w[:, np.newaxis], np.array(b) * w # Apply weights to A, b\n\n\ttry:\n\t\tsolve = optimize.lsq_linear(weighted_A, weighted_b, bounds=list(zip(*bounds)), tol=1e-2)\n\t\treturn solve[\"x\"]\n\n\texcept np.linalg.LinAlgError as e:\n\t\tout = f\"SVD did not converge in Lin Least Sq. Printing params: {A}, {b}\"\n\t\traise ArithmeticError(out)\n\n\nclass InverseDynamicsSolver:\n\t\"\"\"Through scipy optimisation, Skeleton finds a set of force data that corresponds to the correct kinematic data.\n\tTakes a skeleton, and the relevant bones and joints, and solves the set of forces that correspond to correct kinematics.\"\"\"\n\n\tdef __init__(self, joint_data, target_bones, body_joints, no_torque_joints=None, no_reaction_joints=None,\n\t\tfoot_joints=None, leg_spring_joints=None, model=Model(),\n\t\tfreq=50.0, name=\"output\", is_mocap=True):\n\n\t\tfor var in [foot_joints, leg_spring_joints, no_reaction_joints, no_torque_joints]:\n\t\t\tif var is None:\n\t\t\t\tvar = []\n\n\t\tself.name = name\n\t\tself.freq = freq\n\t\tself.n_frames, self.n_joints, _ = joint_data.shape\n\n\t\tself.model = model\n\t\tself.is_mocap = is_mocap\n\n\t\t# Preprocess joint data - basic smoothing\n\t\tif is_mocap:\n\t\t\twindow_length = self.freq // 2\n\t\telse:\n\t\t\twindow_length = 0.75 * self.freq\n\n\t\tif window_length % 2 == 0: window_length -= 1\n\n\t\tself.T = self.n_frames / self.freq\n\n\t\tself.smooth = lambda X, p=5: signal.savgol_filter(X, window_length=int(window_length), polyorder=p, axis=0)\n\n\t\tp = 5 if self.is_mocap else 2\n\n\t\tself.unsmoothed_data = joint_data # save unsmoothed data for other uses\n\t\tself.joint_pos = self.smooth(joint_data, p=p)\n\t\tself.joint_vel = time_deriv(self.joint_pos, 1 / freq)\n\t\tself.joint_accel = time_deriv(self.smooth(self.joint_vel), 1 / freq)\n\n\t\tself.foot_joints = foot_joints\n\t\tself.body_joints = body_joints\n\t\tself.get_foot_joint_from_index = {} # Identify which foot from the index\n\t\tfor fj in self.foot_joints:\n\t\t\tfor bone, (j1, j2) in target_bones.items():\n\t\t\t\tif fj in [j1, j2]:\n\t\t\t\t\tself.get_foot_joint_from_index[fj] = bone\n\n\t\tself.no_torque_joints = no_torque_joints\n\t\tself.no_reaction_joints = no_reaction_joints\n\n\t\tself.target_bones_dict = target_bones # for use in plotting\n\t\tself.target_bones = []\n\n\t\tself.total_mass = 0\n\n\t\tfor bone, (joint1, joint2) in target_bones.items():\n\t\t\t# Calculate length using the initial positions of jointA and B.\n\t\t\t# Smoothing functions can cause the issues for the first few frames, so take avg of later frames\n\n\t\t\tframes = [50, 51, 52, 53, 54, 55, 56]\n\t\t\tn_averaging = len(frames)\n\t\t\tlength = 0\n\n\t\t\tfor frame in frames:\n\t\t\t\tposA = Vector(*self.joint_pos[frame, joint1])\n\t\t\t\tposB = Vector(*self.joint_pos[frame, joint2])\n\n\t\t\t\tif posA.length() == 0 or posB.length() == 0:\n\t\t\t\t\tn_averaging -= 1\n\t\t\t\telse:\n\t\t\t\t\tlength += posA > posB\n\n\t\t\tlength = length / n_averaging # avg of all the frames data taken from\n\n\t\t\tif length == 0:\n\t\t\t\tprint(f\"Warning: Error in calculating length of '{bone}'\")\n\t\t\t\tlength = 0.01\n\n\t\t\tb = DoubleCylinder(start=joint1, end=joint2, length=length, name=bone, freq=freq,\n\t\t\t\t\t\t\t **self.model.bone_length_definitions[\"normal\"](length))\n\n\t\t\tself.target_bones.append(b) # add bone to list\n\t\t\tself.total_mass += b.mass\n\n\t\tself.body = Body(*body_joints, self.joint_pos, freq=freq, name=\"body\")\n\n\t\tself.body.get_kinematics(\n\t\t\tnp.stack([self.joint_pos[:, body_joints[0]], self.joint_pos[:, body_joints[1]]], axis=1))\n\t\tself.body.get_dynamics()\n\n\t\tself.total_mass += self.body.mass\n\n\t\t# Paw parameters\n\t\tm = self.total_mass\n\t\tpaw_d = self.model.paws\n\t\tself.L0_paws = {\"front\": paw_d[\"L0_front\"] * m, \"rear\": paw_d[\"L0_rear\"] * m}\n\t\tself.k_paws = {\"front\": paw_d[\"k_front\"] * m, \"rear\": paw_d[\"k_rear\"] * m ** paw_d[\"k_rear_prop\"]}\n\t\tself.c_paws = {\"front\": paw_d[\"c_front\"] * m, \"rear\": paw_d[\"c_rear\"] * m}\n\n\t\t# if self.model.equation_weighting['Paw spring'] > 0:\n\t\tself.set_paw_equilibrium()\n\n\t\tself.get_dynamics()\n\t\tself.leg_spring_joints = leg_spring_joints\n\n\t\tself.calc_leg_lengths()\n\n\t\tself.equation_weighting = model.equation_weighting\n\n\tdef get_dynamics(self):\n\t\t\"\"\"Gets dynamics of centre of mass of each bone & body\"\"\"\n\t\tfor bone in self.target_bones:\n\t\t\tbone.get_kinematics(self.joint_pos[:, [bone.start, bone.end]])\n\t\t\tbone.get_dynamics()\n\n\t\tbody = self.body\n\t\tbody.get_kinematics(\n\t\t\tnp.stack([self.joint_pos[:, body.start_joints], self.joint_pos[:, body.end_joints]], axis=1))\n\t\tbody.get_dynamics()\n\n\tdef calculate_forces(self, n_frame, report_equations=True):\n\t\t\"\"\"\n\t\tSets up a system of linear equations governing the motion of the skeleton at a given frame.\n\n\t\tThese equations are:\n\n\t\t- FREE JOINTS:\n\t\tThe torques at free joints are zero. Free joints are joints only connected to one bone, on the end of the body eg the feet\n\n\t\t- INERTIA:\n\t\tOn each bone, the sum of the two joint forces is equal to the mass * acceleration of the bone\n\n\t\t- ROTATION:\n\t\tOn each bone, the net torque about the bone is equal to the I * alpha_g of the bone\n\n\t\t- BODY:\n\t\tThe body is set up as a slightly different type of bone, in which it has several joints connected at either end, and its position is dictated by all of those joints.\n\t\tSee the code for it below, it has its own set of inertial and rotational equations.\n\n\t\tThis is set up as a least squares problem Ax = b, where A is a matrix of coefficients to multiply the unknowns by,\n\t\tx is the unknowns (in the form [F_1_x, F_1_y, F_1_z, F_2_x, ... T_1, T, ...]\n\t\tb is the result of the equations.\n\n\t\tA weighting is also applied to each row to weight the least squares problem (eg to priorities free joint equations)\n\n\t\tThe problem also has bounds applied to it. For now, these bounds are simply that foot joint vertical reaction forces are non negative.\n\n\t\tImprovements:\n\t\t- Replace the current spinal system with a large non axisymmetric cylinder to represent the body\n\t\t- Add a sphere to represent the head\n\n\t\t\"\"\"\n\n\t\t# Consult report for explanation of system\n\n\t\tA = []\n\t\tb = []\n\t\tweights = [] # Collect weightings for each equation as they are added to the system\n\t\tequation_weighting = self.equation_weighting\n\n\t\t# Reasonable bounds for each force, and for each torque. Current limits set at 10 * weight for mass, 10 * mass at one metre for torque\n\t\tmax_force = 3 * self.total_mass * g\n\t\tmax_torque = 3 * self.total_mass * g\n\t\t# bounds can be adjusted further for specific joints (eg no downards reaction at the feet)\n\t\tbounds = [(-max_force, max_force)] * (3 * self.n_joints) + [(-max_torque, max_torque)] * (self.n_joints)\n\n\t\tdef A_row(vals={}):\n\t\t\t\"\"\"Returns a row of 0s length 4 * self.n_joints, with other vectors in any indices in vals.\n\t\t\tvals is a dict of index:vector\"\"\"\n\t\t\trow = [0.0] * 4 * self.n_joints\n\t\t\tfor index, val in vals.items():\n\t\t\t\trow[index] = val\n\t\t\treturn row\n\n\t\tdef add_blank_row():\n\t\t\tA.append(A_row({}))\n\t\t\tb.append(0)\n\t\t\tweights.append(0)\n\n\t\tdef add_n_blank_rows(n=1):\n\t\t\tfor i in range(n): add_blank_row()\n\n\t\tnull, unit, g_vec = Vector(0, 0, 0), Vector(1, 1, 1), Vector(0, 0, -g)\n\n\t\tn_joints = self.n_joints\n\n\t\tdef get_index(joint, dimension=0, is_force=True):\n\t\t\t\"\"\"Get correct index of D\"\"\"\n\t\t\treturn (3 * n_joints * int(not is_force)) + ([1, 3][is_force] * joint) + dimension\n\n\t\t# dimension = 0 for x, 1 for y, 2 for z\n\n\t\t# First, add the equations to show that the torques in each of the foot joints are zero\n\t\tfor no_torque_joint in self.no_torque_joints:\n\t\t\t# Set up the equation 1 * tau_{foot_joint} = 0\n\t\t\t# BOUNDARY CONDITIONS ARE FIXED, RATHER THAN AN ADDITIONAL EQUATION. SO INCORPORATE THEM INTO BOUNDS\n\t\t\tbounds[get_index(no_torque_joint, is_force=False)] = (0, 1e-10)\n\n\t\tfor no_reaction_joint in self.no_reaction_joints: # BC : no reactions\n\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\tbounds[get_index(no_reaction_joint, dimension=dim, is_force=True)] = (0, 1e-10)\n\n\t\tfor foot_joint in self.foot_joints:\n\t\t\t## If the feet are a certain amount off the ground for that foot, also assign the reaction forces to be zero\n\t\t\tbone_name = self.get_foot_joint_from_index[foot_joint]\n\n\t\t\tend = bone_name.split(\" \")[1] # get 'front' or 'rear'\n\t\t\tL0 = self.L0_paws[end] # get stiffness from 'front' or 'rear' in bone name\n\t\t\t# L0 = self.paw_equilibrium_values[foot_joint]\n\n\t\t\tk_paw = self.k_paws[end]\n\t\t\tc_paw = self.c_paws[end]\n\n\t\t\tpaw_disp = self.paw_disps[foot_joint][n_frame]\n\n\t\t\tpaw_off_ground = self.joint_pos[n_frame, foot_joint, 2] >= L0 # BC: no reaction in foot off ground\n\t\t\tpaw_off_ground = paw_disp == 0\n\n\t\t\tif paw_off_ground: # BC: no reaction in foot off ground\n\t\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\t\tbounds[get_index(foot_joint, dimension=dim, is_force=True)] = (0, 1e-10)\n\n\t\t\t\tadd_n_blank_rows(4) # for consistency of number of eqns\n\n\t\t\telse: # If paw near ground, add force due to spring\n\n\t\t\t\theight = self.unsmoothed_data[n_frame, foot_joint, 2]\n\n\t\t\t\teps = L0 - height # min((L0 - height), L0/2)\n\t\t\t\teps_dot = self.joint_vel[n_frame, foot_joint, 2]\n\n\t\t\t\tF_damp = 0 # c_paw * eps_dot\n\n\t\t\t\tif self.model.equation_weighting['Paw spring'] > 0:\n\t\t\t\t\t## PAW SPRING MODEL\n\t\t\t\t\teps = paw_disp\n\t\t\t\t\tF_spring = k_paw * eps + c_paw * eps_dot\n\n\t\t\t\t\tif foot_joint != 20:\n\t\t\t\t\t\tA.append(A_row({get_index(foot_joint, dimension=2, is_force=True): 1}))\n\t\t\t\t\t\tb.append(F_spring + F_damp)\n\t\t\t\t\t\tweights.append(equation_weighting[\"Paw spring\"])\n\n\t\t\t\tif self.model.equation_weighting['Leg spring'] > 0:\n\t\t\t\t\t## LEG SPRING MODEL\n\t\t\t\t\tK = 3000 if end == \"front\" else 2000\n\t\t\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\t\t\t# component = self.leg_vecs[foot_joint][n_frame][dim]\n\t\t\t\t\t\tF_spring = self.leg_disps[foot_joint][n_frame] * K # * component\n\t\t\t\t\t\tA.append(A_row({get_index(foot_joint, dimension=dim, is_force=True): 1}))\n\t\t\t\t\t\tb.append(F_spring + F_damp)\n\t\t\t\t\t\tweights.append(equation_weighting[\"Leg spring\"])\n\n\t\t\t\t# Set bounds for foot joints to only have positive vertical reactions\n\t\t\t\tbounds[get_index(foot_joint, dimension=2, is_force=True)] = (0, max_force)\n\t\t\t\tbounds[get_index(foot_joint, dimension=1, is_force=True)] = (0, 1e-10) # set Fy=0\n\n\t\tfor bone in self.target_bones:\n\t\t\tj_1, j_2 = bone.start, bone.end\n\t\t\tx_1, x_2 = bone.X[n_frame]\n\n\t\t\t# F_1 + F_2 + F_grav = F_net\n\t\t\tF_net = bone.F_net[n_frame]\n\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\tA.append(A_row({get_index(j_1, dim): 1, get_index(j_2, dim): - 1}))\n\t\t\t\tb.append((F_net - bone.mass * g_vec)[dim])\n\t\t\t\tweights.append(equation_weighting[\"Inertial\"])\n\n\t\t\ttau_net = bone.tau_net[n_frame]\n\t\t\tx_g = bone.XG[n_frame]\n\t\t\tr_1, r_2 = (x_1 - x_g), (x_2 - x_g)\n\n\t\t\t# direction of each T is perpendicular to the bones that the joint is on\n\t\t\tadjacent_1_bone = [b for b in self.target_bones if b.end == j_1 and b != bone]\n\t\t\tif len(adjacent_1_bone) == 1: # if there is an adjacent bone\n\t\t\t\tadj_bone = adjacent_1_bone[0]\n\t\t\t\tT_1_dir = Vector(*r_1).cross((adj_bone.X[n_frame, 1] - adj_bone.XG[n_frame])).unit()\n\t\t\tif len(adjacent_1_bone) == 0 or np.isnan(T_1_dir).any(): # if no adjacent, or if above calc causes error\n\t\t\t\tT_1_dir = (0, 1, 0) # Improve later, for now say all torques about y axis\n\n\t\t\tadjacent_2_bone = [b for b in self.target_bones if b.start == j_2 and b != bone]\n\t\t\tif len(adjacent_2_bone) == 1: # if there is an adjacent bone\n\t\t\t\tadj_bone = adjacent_2_bone[0]\n\t\t\t\tT_2_dir = Vector(*r_2).cross((adj_bone.X[n_frame, 0] - adj_bone.XG[n_frame])).unit()\n\t\t\tif len(adjacent_2_bone) == 0 or np.isnan(T_2_dir).any(): # if no adjacent, or if above calc causes error\n\t\t\t\tT_2_dir = (0, 1, 0) # Improve later, for now say all torques about y axis\n\n\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\t# This loop essentially writes out the following equations into A and b for each dimension (x,y,z):\n\t\t\t\t# r1 x F1 + r2 x F2 + T1 + T2 = T_net\n\n\t\t\t\t# The cross product of r = (x,y,z) and F = (Fx, Fy, Fz) yields (Fz*y - Fy*z, ...)\n\t\t\t\t# Take the x component, x -> Fz*y - Fy*z\n\t\t\t\t# Notice that Fy is negative, and Fz is positive. This is always true, that, for the forces, one lower dimension than the current is positive, and one higher is negative (cyclical relations)\n\t\t\t\t# use this below\n\n\t\t\t\t# Get dim above and below, wrapping round for below x and above z\n\t\t\t\tdim_below = (dim - 1) % 3\n\t\t\t\tdim_above = (dim + 1) % 3\n\n\t\t\t\tcoeff_dict = {\n\t\t\t\t\tget_index(j_1, dim): 0,\n\t\t\t\t\t# eg no effect of F_x in the x directional torque (not relevant statement, only here for readability)\n\t\t\t\t\tget_index(j_1, dim_above): - r_1[dim_below], # eg multiply - z by Fy in the x direction\n\t\t\t\t\tget_index(j_1, dim_below): r_1[dim_above], # eg multiply y by Fz in the x direction\n\n\t\t\t\t\t# Reversed polarity for joint 2 as the desired force is - F2\n\t\t\t\t\tget_index(j_2, dim_above): r_2[dim_below],\n\t\t\t\t\tget_index(j_2, dim_below): - r_2[dim_above],\n\n\t\t\t\t\t# Add the torques on each joint\n\t\t\t\t\tget_index(j_1, is_force=False): T_1_dir[dim],\n\t\t\t\t\tget_index(j_2, is_force=False): -T_2_dir[dim]\n\n\t\t\t\t}\n\n\t\t\t\tA.append(A_row(coeff_dict))\n\t\t\t\tb.append(tau_net[dim])\n\t\t\t\tweights.append(equation_weighting[\"Rotational\"])\n\n\t\t### SOLVE FORCES ON BODY. Note body defined so all joint forces/torques on it are positive\n\t\tbody = self.body\n\t\tF_net = body.F_net[n_frame]\n\n\t\t# BODY INERTIAL FORCES\n\t\tfor dim in [0, 1, 2]:\n\t\t\tA.append(A_row({get_index(j, dim): 1 for j in self.body.start_joints + self.body.end_joints}))\n\t\t\tb.append((F_net - body.mass * g_vec)[dim])\n\t\t\tweights.append(equation_weighting[\"Inertial\"])\n\n\t\t# BODY ROTATIONAL FORCES - same as for bones\n\t\tx_g = body.XG[n_frame]\n\t\ttau_net = body.tau_net[n_frame]\n\n\t\t# Improve above later, for now say all torques about y axis\n\t\tT_dir = (0, 1, 0)\n\n\t\tfor dim in [0, 1, 2]:\n\t\t\tcoeff_dict = {}\n\t\t\tfor joint in body.start_joints + body.end_joints:\n\t\t\t\tx_j = self.joint_pos[n_frame, joint]\n\t\t\t\tr_j = (x_j - x_g) # position vector to centre\n\n\t\t\t\t# Get dim above and below, wrapping round for below x and above z\n\t\t\t\tdim_below, dim_above = (dim - 1) % 3, (dim + 1) % 3\n\n\t\t\t\tcoeff_dict[get_index(joint, dim_above)] = -r_j[dim_below] # eg multiply - z by Fy in the x direction\n\t\t\t\tcoeff_dict[get_index(joint, dim_below)] = r_j[dim_above] # eg multiply y by Fz in the x direction\n\n\t\t\t\tcoeff_dict[get_index(joint, is_force=False)] = T_dir[dim] # Add pure torque of pin\n\n\t\t\tA.append(A_row(coeff_dict))\n\t\t\tb.append(tau_net[dim])\n\t\t\tweights.append(equation_weighting[\"Rotational\"])\n\n\t\t# print each line of the equations defined by A, b, with the final result\n\t\t# Only print variables with both non-zero values, and non-zero coefficients\n\t\tif report_equations:\n\t\t\tprint(f\"----Frame {n_frame}----\")\n\t\t\tparams = []\n\n\t\t\tfor joint in range(self.n_joints):\n\t\t\t\tfor dim in \"xyz\":\n\t\t\t\t\tparams.append(F\"F_{joint}_{dim}\") # Add forces by joint\n\n\t\t\tfor joint in range(self.n_joints):\n\t\t\t\tparams.append(F\"T_{joint}\") # Add torques by joint\n\n\t\t\tfor n, (coeffs, result) in enumerate(zip(A, b)):\n\t\t\t\ts = []\n\t\t\t\tfor j, (coeff, param) in enumerate(zip(coeffs, params)):\n\t\t\t\t\tif coeff != 0:\n\t\t\t\t\t\ts.append(f\"{round(coeff, 3)} * {param}\")\n\n\t\t\t\t# b_actual = np.dot(A[n], D)\n\t\t\t\t# pct_error = abs(100 * (b_actual - result) / b_actual)\n\t\t\t\tif n <= 7:\n\t\t\t\t\tprint(f\"{' + '.join(s)} = {round(result, 3)}\") # ({round(b_actual, 3)}) [{round(pct_error, 2)}%]\")\n\n\t\treturn A, b, weights, bounds\n\n\tdef solve_forces(self, report_equations=False, end_frames_disregarded=5, prefix=\"\",\n\t\t\t\t\t save=True):\n\t\t\"\"\"Solves the forces at each frame for the system, collects them and saves them to .npy files.\n\n\t\tNote: Currently, due to smoothing, the first 5 and last 5 frames are disregarded\"\"\"\n\n\t\tself.get_dynamics()\n\t\tn_joints = self.n_joints\n\n\t\tif report_equations:\n\t\t\tprint(\"Solving system...\")\n\t\t\tprint(f\"Total mass {round(self.total_mass, 2)} kg.\")\n\n\t\t# If dir doesn't exist, make it\n\t\tdir = path_join(DataSources.dynamics_data, self.name)\n\t\tif self.name not in os.listdir(DataSources.dynamics_data):\n\t\t\tos.mkdir(dir)\n\n\t\tforces, torques = [], []\n\n\t\tf_shape, t_shape = (self.n_joints, 3), (self.n_joints,)\n\t\t# Add zeros either end due to not being able to calculate for the first or last 2 frames\n\t\tfor i in range(end_frames_disregarded):\n\t\t\tforces.append(np.zeros(f_shape))\n\t\t\ttorques.append(np.zeros(t_shape))\n\n\t\tcalc_forces = []\n\t\tcalc_torques = []\n\n\t\tprogress = tqdm(total=self.n_frames - 2 * end_frames_disregarded)\n\t\tfor n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):\n\t\t\tA, b, weights, bounds = self.calculate_forces(n_frame, report_equations=report_equations)\n\n\t\t\tD = weighted_bound_least_squares(A, b, weights, bounds, rcond=None)\n\n\t\t\tf, tau = D[:(3 * n_joints)], D[(3 * n_joints):]\n\n\t\t\tf, tau = f.reshape((n_joints, 3)), tau.reshape((n_joints))\n\n\t\t\tcalc_forces.append(f)\n\t\t\tcalc_torques.append(tau)\n\n\t\t\tprogress.update()\n\n\t\tforces[end_frames_disregarded: - end_frames_disregarded] = calc_forces\n\t\ttorques += calc_torques\n\n\t\tfor i in range(end_frames_disregarded):\n\t\t\tforces.append(np.zeros(f_shape))\n\t\t\ttorques.append(np.zeros(t_shape))\n\n\t\tif save:\n\t\t\tnp.save(path_join(dir, prefix + \"forces.npy\"), forces)\n\t\t\tnp.save(path_join(dir, prefix + \"torques.npy\"), torques)\n\n\t\treturn np.array(forces), np.array(torques)\n\n\tdef get_com_position(self):\n\t\t\"\"\"Calculates the position of the centre of mass of the whole system at each timestep\"\"\"\n\t\treturn sum(b.XG * b.mass for b in self.target_bones + [self.body]) / self.total_mass\n\n\tdef return_equations(self, end_frames_disregarded=5):\n\t\t\"\"\"For each frame, return the equation vector b\"\"\"\n\t\tself.get_dynamics()\n\t\tbs = []\n\n\t\tfor n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):\n\t\t\tA, b, weights, bounds = self.calculate_forces(n_frame, report_equations=False)\n\t\t\tbs.append(b)\n\n\t\treturn np.array(bs)\n\n\tdef set_paw_equilibrium(self):\n\t\t\"\"\"Get paw equilibrium from mocap data by finding the drop of the paw.\n\t\tThis method will work for the current dataset, but is likely not robust, so can be replaced with\n\t\ta better method of finding the paw equilibrium at a later date\"\"\"\n\n\t\tif self.is_mocap:\n\t\t\tpaw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]\n\n\t\telse:\n\t\t\tpaw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]\n\n\n\t\tself.paw_disps = {} # paw joint: displacement over time, for paw spring model\n\n\t\tmin_contacts_detected = 3 # minimum requirement to use peak detection mode\n\n\t\tplot = True\n\t\tif plot:\n\t\t\tfig, axes = plt.subplots(nrows=2, ncols=2)\n\n\t\tfootfall_detector = FootfallDetector(train=False, load=True, name=[\"smal\", \"mocap\"][self.is_mocap])\n\t\tfor n, paw in enumerate(self.foot_joints):\n\t\t\tcontact_ends_failed = False\n\t\t\tdisp = np.zeros((self.n_frames)) # will give eps - the displacement of the paw from equilibrium\n\t\t\t# for when the paw is in contact with the ground\n\n\t\t\tZ = paw_z_heights[:, n]\n\n\t\t\ton_ground = footfall_detector.process_clip(Z)\n\t\t\ton_ground_idxs = np.where(on_ground > 0)[0]\n\n\t\t\tif plot:\n\t\t\t\taxes[n // 2, n % 2].plot(Z.mean() * (on_ground), color=\"red\", alpha=0.3)\n\n\t\t\tmin_footfall_width = 3 # 3 frames long minimum to count as a footfall\n\t\t\tfootfalls = consecutive(on_ground_idxs)\n\t\t\ttrigger_height = np.percentile(np.array([Z[ff].max() for ff in footfalls]), 25) # mean trigger height\n\t\t\tfor footfall in footfalls:\n\t\t\t\tif len(footfall) > min_footfall_width:\n\t\t\t\t\t# disp[footfall] = Z[footfall].max() - Z[footfall] # old\n\t\t\t\t\tdisp[footfall] = np.clip(trigger_height - Z[footfall], a_min=0, a_max=None)\n\n\t\t\tself.paw_disps[paw] = disp\n\n\t\t\tif plot:\n\t\t\t\tax = axes[n // 2, n % 2]\n\t\t\t\tax.plot(Z)\n\n\t\t\t\tZ_on_ground = Z.copy()\n\t\t\t\tZ_on_ground[disp == 0] = np.nan\n\t\t\t\tax.plot(Z_on_ground, color=\"green\")\n\t\t\t\tax.plot(disp)\n\t\t\t\tZ_smoothed = self.joint_pos[:, paw, 2]\n\n\t\t\t\tax.set_title(n)\n\n\t\tif plot:\n\t\t\tplt.show(block=False)\n\t\t\tplt.draw()\n\t\t\tplt.pause(1e-8)\n\n\tdef view_ground_displacements(self, deriv=0):\n\t\t\"\"\"Plot and show a graph of vertical displacement against frames for each paw - identifying L0 for each paw\"\"\"\n\n\t\tfig, axes = plt.subplots(nrows=4)\n\t\tfor n, j in enumerate(self.foot_joints):\n\t\t\tlabel = foot_joint_labels[n]\n\t\t\tax = axes[n]\n\t\t\tif deriv == 0:\n\t\t\t\tX = self.joint_pos[:, j, 2]\n\t\t\t\tX_unsmoothed = self.unsmoothed_data[:, j, 2]\n\t\t\t\tax.plot(X)\n\t\t\t\tax.plot(X_unsmoothed, alpha=.6)\n\t\t\t\t# ax.axhline(self.paw_equilibrium_values[j], ls = \"--\")\n\t\t\t\tax.axhline(self.L0_paws[label.split(\" \")[0]])\n\t\t\telif deriv == 1:\n\t\t\t\tax.plot(self.joint_vel[:, j, 2])\n\n\t\t\tax.set_title(label)\n\n\t\tplt.show()\n\n\tdef view_com_displacements(self, deriv=0):\n\t\t\"\"\"Plot and show graph of X, Y, and Z motion of CoM of dog.\n\t\tIf deriv > 0, plot that derivative of the displacement\"\"\"\n\n\t\tfig, ax = plt.subplots()\n\t\tcom_data = self.get_com_position()\n\t\tif deriv > 0:\n\t\t\tcom_data = nth_time_deriv(com_data, 1 / self.freq, n=deriv)\n\n\t\tfor i in [0, 1, 2]:\n\t\t\tax.plot(com_data[:, i], label=\"xyz\"[i])\n\n\t\tax.legend()\n\t\tplt.show()\n\n\tdef calc_leg_lengths(self):\n\t\t\"\"\"Uses the compliant-legged walking model estimation to work out the average length of legs.\n\t\tAssume legs are undeformed while off ground. Work out avg distance from leg to COM\"\"\"\n\n\t\tself.leg_disps = {} # length of leg over time for each paw\n\t\tself.leg_vecs = {} # normalised vector of leg spring direction for each paw\n\n\t\tplot = True\n\t\tif plot: fig, axes = plt.subplots(nrows=2, ncols=2, sharex=\"all\", sharey=\"row\")\n\n\t\tfor n, paw in enumerate(self.foot_joints):\n\t\t\tis_front = n < 2 # Assumes order of f left, f right, r left, r right\n\n\t\t\ttol = 1e-3\n\t\t\ton_ground = self.paw_disps[paw] > tol\n\t\t\toff_ground = self.paw_disps[paw] <= tol\n\n\t\t\t# centre_of_rot = self.body.XG[:]#self.body.X[:, int(is_front)]\n\t\t\t# centre_of_rot = self.unsmoothed_data[:, self.body_joints[is_front][n%2]]\n\t\t\tif self.is_mocap:\n\t\t\t\tcentre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]\n\t\t\t\tpaw_pos = self.unsmoothed_data[:, paw]\n\n\t\t\telse:\n\t\t\t\tcentre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]\n\t\t\t\tpaw_pos = self.unsmoothed_data[:, paw]\n\n\t\t\tX, Z = np.swapaxes(centre_of_rot[:, [0, 2]], 0, 1) # get X, Z position of CoM\n\t\t\tX_PAW, Z_PAW = np.swapaxes(paw_pos[:, [0, 2]], 0, 1) # get X, Z position of CoM\n\n\t\t\tTHETA = np.arctan((X_PAW - X) / (Z - Z_PAW)) # angle between spring and vertical\n\n\t\t\tL = ((X - X_PAW) ** 2 + (Z - Z_PAW) ** 2) ** .5\n\t\t\tL0 = (L).max()\n\n\t\t\tz_disp = (L - L0) * np.cos(THETA)\n\t\t\tx_disp = (L - L0) * np.sin(THETA)\n\n\t\t\t# get z displacement by footfall\n\t\t\tdisp = np.zeros(self.n_frames)\n\n\t\t\t# if self.is_mocap:\n\t\t\tfor ff in consecutive(np.where(on_ground)[0]):\n\t\t\t\tif len(ff) < 3: continue # min width of footfall required\n\t\t\t\tdisp[ff] = z_disp[ff].max() - z_disp[ff]\n\n\t\t\t# else:\n\t\t\t# disp = -z_disp\n\n\t\t\tself.leg_disps[paw] = disp\n\n\t\t\tif plot:\n\t\t\t\tax = axes[n // 2, n % 2]\n\n\t\t\t\t# ax.plot(L)\n\t\t\t\tax.plot(L - L0)\n\t\t\t\tax.plot(disp, color=\"green\")\n\n\t\tif plot:\n\t\t\tplt.tight_layout()\n\t\t\t# plt.show()\n\t\t\tplt.show(block=False)\n\t\t\tplt.draw()\n\t\t\tplt.pause(1e-8)\n\n\ndef norm_kin_data(kin_data, targ_markers=None):\n\t\"\"\"Normalise kinematic data.\n\tIf targ_markers given, normalise so these markers are at desired height\"\"\"\n\n\tnorm_height = 0.4 # 0.635 # fixed to Ally height for now\n\n\t# scale so minimum is at (0,0,0)\n\tfor dim in [0, 1, 2]:\n\t\tkin_data[:, :, dim] -= kin_data[:, :, dim].min()\n\n\tif targ_markers is None:\n\t\tkin_data = norm_height * kin_data / np.max(kin_data[:, :, 2])\n\n\telif targ_markers is not None:\n\t\theight_target = kin_data[:, targ_markers, 2].mean()\n\t\tkin_data = norm_height * kin_data / height_target\n\n\treturn kin_data\n\n\ndef get_dyn_data(dynamic_src, clip_length, mass, is_mocap=True, target_freq=100):\n\t\"\"\"Loads and returns kinematic data\"\"\"\n\n\tforce_plate_data, force_plate_tdelay = load_force_plate_data(dynamic_src, is_mocap)\n\traw_dyn_data = force_plate_data\n\traw_dyn_data *= 1 / (mass * 9.81)\n\n\t# resample if requested\n\tif target_freq != freq_forceplate:\n\t\ttarget_frames = int(len(raw_dyn_data) * target_freq / freq_forceplate)\n\t\tdyn_data = signal.resample(raw_dyn_data, target_frames)\n\n\t\t# this resampling causes a jumpiness for the periods of zero value. Fix that here:\n\t\ttol = 1e-4\n\t\tfor paw in range(dyn_data.shape[1]):\n\t\t\t# get indices where should be 0\n\t\t\tantifootfalls = consecutive(np.where(raw_dyn_data[:, paw] < tol)[0])\n\t\t\tmin_width = 10 # in frames\n\n\t\t\tfor aff in antifootfalls:\n\t\t\t\tif len(aff) < min_width: continue\n\t\t\t\tstart, end = aff[0] * target_freq / freq_forceplate, aff[-1] * target_freq / freq_forceplate\n\t\t\t\t# ^ start and end indices, in remapped frame\n\t\t\t\tdyn_data[int(start):int(end), paw] = 0 # set to 0\n\n\t\tfreq = target_freq\n\n\telse:\n\t\tfreq = freq_forceplate\n\t\tdyn_data = raw_dyn_data\n\n\tframe_delay = int(freq * force_plate_tdelay)\n\tn_frames_forceplate = int(clip_length * freq) # number of frames for forceplate to be same time length as mocap\n\n\tif frame_delay == 0:\n\t\treturn dyn_data[:n_frames_forceplate]\n\n\tif frame_delay > 0: # crop forceplate data\n\t\treturn dyn_data[frame_delay: frame_delay + n_frames_forceplate] # crop forceplate data to match mocap/SMAL data\n\n\telse: # fdelay <0, pad forceplate data\n\t\treturn np.pad(dyn_data, ((int(-frame_delay), 0), (0, 0)))[:n_frames_forceplate]\n\n\nkin_src_to_solver_name = lambda s: s.replace(\"/\", \" \").replace(\" \", \"_\").replace(\".c3d\", \"\")\n\n\ndef load_solver(kin_src, clip_length, mocap=True, resample_freq=100):\n\tif mocap:\n\t\tjoint_data = C3DData(ax=None, src=kin_src, interpolate=True, crop=clip_length,\n\t\t\t\t\t\t\t fix_rotations=\"3 kph\" in kin_src) # only fix rotations for 3 kph for now\n\n\telse:\n\t\tjoint_data = SMALData(kin_src, freq=30, norm=True, crop=clip_length, smooth=True)\n\n\tjoint_data.resample_at(resample_freq) ### TRY RESAMPLING DATA TO 100 Hz\n\ttarget_bones, body_joints, no_torque_joints, leg_spring_joints = joint_data.generate_skeleton_mapping()\n\n\t# Normalise data based on z data, so that the dog is roughly 0.5m high. Also smooth data\n\tkin_data = np.array(joint_data.all_data)\n\tkin_data = norm_kin_data(kin_data, targ_markers=leg_spring_joints)\n\n\tsolver_kwargs = dict(target_bones=target_bones,\n\t\t\t\t\t\t body_joints=body_joints, no_torque_joints=no_torque_joints,\n\t\t\t\t\t\t foot_joints=no_torque_joints, leg_spring_joints=leg_spring_joints,\n\t\t\t\t\t\t freq=joint_data.freq,\n\t\t\t\t\t\t name=kin_src_to_solver_name(kin_src))\n\n\tsolver = InverseDynamicsSolver(joint_data=kin_data, **solver_kwargs, is_mocap=mocap)\n\tprint(f\"Solver loaded. Mass = {solver.total_mass:.1f} kg.\")\n\treturn solver\n"
] | [
[
"scipy.signal.resample",
"scipy.signal.savgol_filter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
gosticks/body-pose-animation | [
"eb1b5876a845f277d43bfc18dcd48c4a9c694c06",
"eb1b5876a845f277d43bfc18dcd48c4a9c694c06"
] | [
"utils/video.py",
"utils/render.py"
] | [
"from dataset import SMPLyDataset\nimport pickle\nfrom typing import Tuple\nfrom model import SMPLyModel\nfrom renderer import DefaultRenderer\nimport cv2\nfrom tqdm import tqdm\nimport numpy as np\nfrom scipy import interpolate\n\n\ndef make_video(images, video_name: str, fps=30, ext: str = \"mp4\", post_process_frame=None):\n images = np.array(images)\n width = images.shape[2]\n height = images.shape[1]\n\n fourcc = 0\n if ext == \"mp4\":\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n\n video_name = video_name + \".\" + ext\n\n video = cv2.VideoWriter(\n video_name, fourcc, fps, (width, height), True)\n\n for idx in tqdm(range(len(images))):\n img = images[idx]\n im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if post_process_frame is not None:\n img_rgb = post_process_frame(img=im_rgb, idx=idx)\n\n video.write(im_rgb)\n\n video.release()\n print(\"video saved to:\", video_name)\n\n\ndef video_from_pkl(filename, video_name, config, ext: str = \"mp4\"):\n with open(filename, \"rb\") as fp:\n model_outs = pickle.load(fp)\n save_to_video(model_outs, video_name, config)\n\n\ndef save_to_video(\n sample_output: Tuple,\n video_name: str,\n config: object,\n fps=30,\n include_thumbnail=True,\n thumbnail_size=0.2,\n start_frame_offset=0,\n dataset: SMPLyDataset = None,\n interpolation_target=None\n):\n \"\"\"\n Renders a video from pose, camera tuples. Additionally interpolation can be used to smooth out the animation\n\n Args:\n sample_output (Tuple): A tuple of body pose vertices and a camera transformation\n video_name (str): name for the resulting video file (can also be a path)\n config (object): general run config\n fps (int, optional): animation base fps. Defaults to 30.\n interpolation_target (int, optional): expand animation fps via interpolation to this target. Defaults to 60.\n \"\"\"\n r = DefaultRenderer(\n offscreen=True\n )\n r.start()\n\n model_anim = SMPLyModel.model_from_conf(config)\n\n if interpolation_target is not None:\n if interpolation_target % fps != 0:\n print(\"[error] interpolation target must be a multiple of fps\")\n return\n inter_ratio = int(interpolation_target / fps)\n num_intermediate = inter_ratio - 1\n sample_output = interpolate_poses(sample_output, num_intermediate)\n else:\n sample_output = [\n (\n out.vertices.detach().cpu().numpy()[0],\n cam\n ) for out, cam in sample_output]\n frames = []\n print(\"[export] rendering animation frames...\", sample_output[0][0].shape)\n\n # just use the first transform\n cam_transform = sample_output[0][1]\n\n for vertices, cam_trans in tqdm(sample_output):\n r.render_model_geometry(\n faces=model_anim.faces,\n vertices=vertices,\n pose=cam_trans # cam_transform,\n )\n frames.append(r.get_snapshot())\n\n target_fps = fps\n if interpolation_target is not None:\n target_fps = interpolation_target\n\n def post_process_frame(img, idx: int):\n if not include_thumbnail:\n return img\n # account for start from frames not zero\n idx = start_frame_offset + idx\n frame_idx = idx\n if interpolation_target is not None:\n # account for possible interpolation\n frame_idx = int(idx / inter_ratio)\n img_path = dataset.get_image_path(frame_idx)\n overlay = cv2.imread(img_path)\n\n if overlay is None:\n print(\"[error] image could not be \", img_path)\n return img\n\n overlay = cv2.resize(\n overlay,\n dsize=(\n int(overlay.shape[1] * thumbnail_size),\n int(overlay.shape[0] * thumbnail_size)\n ))\n img[0:overlay.shape[0], 0:overlay.shape[1]] = overlay\n return img\n\n make_video(frames, video_name, target_fps,\n post_process_frame=post_process_frame)\n\n\ndef make_video_with_pip(frames, pip_image_path, video_name: str, fps=30, ext: str = \"mp4\", image_size=0.2):\n \"\"\"renders a video with a pip frame in the corner\n \"\"\"\n\n def post_process_frame(img, idx: int):\n overlay = cv2.imread(pip_image_path)\n\n if overlay is None:\n print(\"[error] image could not be \", pip_image_path)\n return img\n\n overlay = cv2.resize(\n overlay,\n dsize=(\n int(overlay.shape[1] * image_size),\n int(overlay.shape[0] * image_size)\n ))\n img[0:overlay.shape[0], 0:overlay.shape[1]] = overlay\n return img\n\n make_video(frames, video_name, fps,\n post_process_frame=post_process_frame)\n\n\ndef interpolate_poses(poses, num_intermediate=5):\n \"\"\"\n Interpolate vertices and cameras between pairs of frames by adding intermediate results\n\n :param poses: optimized poses\n :param num_intermediate: amount of intermediate results to insert between each pair of frames\n :return: interpolated poses, list of tuples (body_pose, camera_pose)\n \"\"\"\n new_poses = []\n for i in range(len(poses) - 1):\n if len(poses) < 2:\n return poses\n else:\n # Shape of one matrix of vertices = torch.Size([1, 10475, 3])\n pose_1 = poses[i][0].vertices.detach().cpu().numpy()\n pose_2 = poses[i + 1][0].vertices.detach().cpu().numpy()\n poses_pair = np.concatenate((pose_1, pose_2), axis=0)\n\n camera_1 = np.expand_dims(poses[i][1], axis=0)\n camera_2 = np.expand_dims(poses[i + 1][1], axis=0)\n camera_pair = np.concatenate((camera_1, camera_2), axis=0)\n\n x = np.arange(poses_pair.shape[0])\n f1 = interpolate.interp1d(x, poses_pair, axis=0)\n f2 = interpolate.interp1d(x, camera_pair, axis=0)\n\n evenly_spaced_points = np.linspace(\n x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)\n\n new_frames = f1(evenly_spaced_points)\n new_cameras = f2(evenly_spaced_points)\n\n arr = [(new_frames[i], new_cameras[i])\n for i in range(new_frames.shape[0])]\n if 0 < i < len(poses) - 1:\n # remove first frame that was already added in the last interpolation\n arr.pop(0)\n new_poses += arr\n\n return new_poses\n",
"\nfrom typing import List, Set, Dict, Tuple, Optional\nimport numpy as np\nimport trimesh\nimport pyrender\n\n\ndef render_model(\n scene,\n model,\n model_out,\n **kwargs\n):\n return render_model_geometry(\n scene=scene,\n faces=model.faces,\n vertices=model_out.vertices.detach().cpu().numpy().squeeze(),\n **kwargs\n )\n\n\ndef render_model_geometry(\n scene,\n faces,\n vertices,\n color=[1.0, 0.3, 0.3, 0.8],\n name=None,\n pose=None\n):\n # set vertex colors, maybe use this to highlight accuracies\n vertex_colors = np.ones([vertices.shape[0], 4]) * color\n\n # triangulate vertex mesh\n tri_mesh = trimesh.Trimesh(vertices, faces,\n vertex_colors=vertex_colors)\n\n mesh = pyrender.Mesh.from_trimesh(tri_mesh)\n\n return scene.add(mesh, name=name, pose=pose)\n\n\ndef render_points(scene, points, radius=0.005, color=[0.0, 0.0, 1.0, 1.0], name=None, transform=None):\n sm = trimesh.creation.uv_sphere(radius=radius)\n sm.visual.vertex_colors = color\n tfs = np.tile(np.eye(4), (len(points), 1, 1))\n tfs[:, :3, 3] = points\n if transform is not None:\n tfs = transform @ tfs\n pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)\n # return the render scene node\n return scene.add(pcl, name=name)\n\n\ndef render_camera(scene, radius=0.5, height=0.5, color=[0.0, 0.0, 1.0, 1.0], name=None):\n sm = trimesh.creation.cone(radius, height, sections=None, transform=None)\n sm.visual.vertex_colors = color\n tfs = np.eye(4)\n pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)\n # return the render scene node\n return scene.add(pcl, name=name)\n\n\ndef render_image_plane(scene, image, scale, name=None):\n height, width, _ = image.shape\n mat = trimesh.visual.texture.TextureVisuals(\n image=image, uv=[[0, 0], [0, 1], [1, 0], [1, 1]])\n tm = trimesh.load('plane.obj', visual=mat)\n tm.visual = mat\n tfs = np.eye(4)\n tfs[0, 0] = width / height * scale\n tfs[1, 1] *= scale\n tfs[2, 2] *= scale\n tfs[0, 3] = (width / height - 1) * scale\n material2 = pyrender.Material(name=name, emissiveTexture=image)\n m = pyrender.Mesh.from_trimesh(tm, poses=tfs)\n return scene.add(m, name=name)\n"
] | [
[
"numpy.expand_dims",
"numpy.linspace",
"numpy.arange",
"numpy.concatenate",
"scipy.interpolate.interp1d",
"numpy.array"
],
[
"numpy.eye",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caditi97/exatrkx-ctd2020 | [
"ed090ddfcc9e2e623fb45000fca71d5ad6ccf3b9"
] | [
"GraphLearning/src/distributed/torch.py"
] | [
"\"\"\"Utility code for running native pytorch distributed\"\"\"\n\nimport os\n\nimport torch.distributed as dist\n\ndef init_workers_file():\n rank = int(os.environ['SLURM_PROCID'])\n n_ranks = int(os.environ['SLURM_NTASKS'])\n sync_file = 'file:///tmp/%s_%s_pytorch_sync' % (\n os.environ['USER'], os.environ['SLURM_JOB_ID'])\n dist.init_process_group(backend='nccl', world_size=n_ranks, rank=rank,\n init_method=sync_file)\n return rank, n_ranks\n\ndef init_workers_mpi():\n dist.init_process_group(backend='mpi')\n rank = dist.get_rank()\n n_ranks = dist.get_world_size()\n return rank, n_ranks\n"
] | [
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.init_process_group"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ishandutta2007/incubator-mxnet | [
"54a3c58c49fdfac595a348301b6f0701db09d4ab"
] | [
"tests/python/unittest/test_io.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: skip-file\nimport mxnet as mx\nfrom mxnet.test_utils import *\nimport numpy as np\nimport os, gzip\nimport pickle as pickle\nimport time\ntry:\n import h5py\nexcept ImportError:\n h5py = None\nimport sys\nfrom common import get_data\nimport unittest\n\n\ndef test_MNISTIter():\n # prepare data\n get_data.GetMNIST_ubyte()\n\n batch_size = 100\n train_dataiter = mx.io.MNISTIter(\n image=\"data/train-images-idx3-ubyte\",\n label=\"data/train-labels-idx1-ubyte\",\n data_shape=(784,),\n batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)\n # test_loop\n nbatch = 60000 / batch_size\n batch_count = 0\n for batch in train_dataiter:\n batch_count += 1\n assert(nbatch == batch_count)\n # test_reset\n train_dataiter.reset()\n train_dataiter.iter_next()\n label_0 = train_dataiter.getlabel().asnumpy().flatten()\n train_dataiter.iter_next()\n train_dataiter.iter_next()\n train_dataiter.iter_next()\n train_dataiter.iter_next()\n train_dataiter.reset()\n train_dataiter.iter_next()\n label_1 = train_dataiter.getlabel().asnumpy().flatten()\n assert(sum(label_0 - label_1) == 0)\n\ndef test_Cifar10Rec():\n get_data.GetCifar10()\n dataiter = mx.io.ImageRecordIter(\n path_imgrec=\"data/cifar/train.rec\",\n mean_img=\"data/cifar/cifar10_mean.bin\",\n rand_crop=False,\n and_mirror=False,\n shuffle=False,\n data_shape=(3,28,28),\n batch_size=100,\n preprocess_threads=4,\n prefetch_buffer=1)\n labelcount = [0 for i in range(10)]\n batchcount = 0\n for batch in dataiter:\n npdata = batch.data[0].asnumpy().flatten().sum()\n sys.stdout.flush()\n batchcount += 1\n nplabel = batch.label[0].asnumpy()\n for i in range(nplabel.shape[0]):\n labelcount[int(nplabel[i])] += 1\n for i in range(10):\n assert(labelcount[i] == 5000)\n\ndef test_NDArrayIter():\n data = np.ones([1000, 2, 2])\n label = np.ones([1000, 1])\n for i in range(1000):\n data[i] = i / 100\n label[i] = i / 100\n dataiter = mx.io.NDArrayIter(data, label, 128, True, last_batch_handle='pad')\n batchidx = 0\n for batch in dataiter:\n batchidx += 1\n assert(batchidx == 8)\n dataiter = mx.io.NDArrayIter(data, label, 128, False, last_batch_handle='pad')\n batchidx = 0\n labelcount = [0 for i in range(10)]\n for batch in dataiter:\n label = batch.label[0].asnumpy().flatten()\n assert((batch.data[0].asnumpy()[:,0,0] == label).all())\n for i in range(label.shape[0]):\n labelcount[int(label[i])] += 1\n\n for i in range(10):\n if i == 0:\n assert(labelcount[i] == 124)\n else:\n assert(labelcount[i] == 100)\n\ndef test_NDArrayIter_h5py():\n if not h5py:\n return\n\n data = np.ones([1000, 2, 2])\n label = np.ones([1000, 1])\n for i in range(1000):\n data[i] = i / 100\n label[i] = i / 100\n\n try:\n os.remove(\"ndarraytest.h5\")\n except OSError:\n pass\n with h5py.File(\"ndarraytest.h5\") as f:\n f.create_dataset(\"data\", data=data)\n f.create_dataset(\"label\", data=label)\n\n dataiter = mx.io.NDArrayIter(f[\"data\"], f[\"label\"], 128, True, last_batch_handle='pad')\n batchidx = 0\n for batch in dataiter:\n batchidx += 1\n assert(batchidx == 8)\n\n dataiter = mx.io.NDArrayIter(f[\"data\"], f[\"label\"], 128, False, last_batch_handle='pad')\n labelcount = [0 for i in range(10)]\n for batch in dataiter:\n label = batch.label[0].asnumpy().flatten()\n assert((batch.data[0].asnumpy()[:,0,0] == label).all())\n for i in range(label.shape[0]):\n labelcount[int(label[i])] += 1\n\n try:\n os.remove(\"ndarraytest.h5\")\n except OSError:\n pass\n\n for i in range(10):\n if i == 0:\n assert(labelcount[i] == 124)\n else:\n assert(labelcount[i] == 100)\n\ndef test_NDArrayIter_csr():\n # creating toy data\n num_rows = rnd.randint(5, 15)\n num_cols = rnd.randint(1, 20)\n batch_size = rnd.randint(1, num_rows)\n shape = (num_rows, num_cols)\n csr, _ = rand_sparse_ndarray(shape, 'csr')\n dns = csr.asnumpy()\n #test CSRNDArray with shuffle=True will throw NotImplementedError \n try:\n csr_iter = mx.io.NDArrayIter({'data': csr}, dns, batch_size, shuffle=True,\n last_batch_handle='discard')\n assert(False)\n except NotImplementedError:\n pass\n\n # make iterators\n csr_iter = iter(mx.io.NDArrayIter(csr, csr, batch_size, last_batch_handle='discard'))\n begin = 0\n for batch in csr_iter:\n expected = np.zeros((batch_size, num_cols))\n end = begin + batch_size\n expected[:num_rows - begin] = dns[begin:end]\n if end > num_rows:\n expected[num_rows - begin:] = dns[0:end - num_rows]\n assert_almost_equal(batch.data[0].asnumpy(), expected)\n begin += batch_size\n\ndef test_LibSVMIter():\n\n def check_libSVMIter_synthetic():\n cwd = os.getcwd()\n data_path = os.path.join(cwd, 'data.t')\n label_path = os.path.join(cwd, 'label.t')\n with open(data_path, 'w') as fout:\n fout.write('1.0 0:0.5 2:1.2\\n')\n fout.write('-2.0\\n')\n fout.write('-3.0 0:0.6 1:2.4 2:1.2\\n')\n fout.write('4 2:-1.2\\n')\n\n with open(label_path, 'w') as fout:\n fout.write('1.0\\n')\n fout.write('-2.0 0:0.125\\n')\n fout.write('-3.0 2:1.2\\n')\n fout.write('4 1:1.0 2:-1.2\\n')\n\n data_dir = os.path.join(cwd, 'data')\n data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,\n data_shape=(3, ), label_shape=(3, ), batch_size=3)\n\n first = mx.nd.array([[ 0.5, 0., 1.2], [ 0., 0., 0.], [ 0.6, 2.4, 1.2]])\n second = mx.nd.array([[ 0., 0., -1.2], [ 0.5, 0., 1.2], [ 0., 0., 0.]])\n i = 0\n for batch in iter(data_train):\n expected = first.asnumpy() if i == 0 else second.asnumpy()\n assert_almost_equal(data_train.getdata().asnumpy(), expected)\n i += 1\n\n def check_libSVMIter_news_data():\n news_metadata = {\n 'name': 'news20.t',\n 'origin_name': 'news20.t.bz2',\n 'url': \"http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/news20.t.bz2\",\n 'feature_dim': 62060,\n 'num_classes': 20,\n 'num_examples': 3993,\n }\n batch_size = 33\n num_examples = news_metadata['num_examples']\n data_dir = os.path.join(os.getcwd(), 'data')\n get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],\n news_metadata['origin_name'])\n path = os.path.join(data_dir, news_metadata['name'])\n data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),\n batch_size=batch_size)\n for epoch in range(2):\n num_batches = 0\n for batch in data_train:\n # check the range of labels\n assert(np.sum(batch.label[0].asnumpy() > 20) == 0)\n assert(np.sum(batch.label[0].asnumpy() <= 0) == 0)\n num_batches += 1\n expected_num_batches = num_examples / batch_size\n assert(num_batches == int(expected_num_batches)), num_batches\n data_train.reset()\n\n check_libSVMIter_synthetic()\n check_libSVMIter_news_data()\n \[email protected](\"test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7826\")\ndef test_CSVIter():\n def check_CSVIter_synthetic():\n cwd = os.getcwd()\n data_path = os.path.join(cwd, 'data.t')\n label_path = os.path.join(cwd, 'label.t')\n with open(data_path, 'w') as fout:\n for i in range(1000):\n fout.write(','.join(['1' for _ in range(8*8)]) + '\\n')\n with open(label_path, 'w') as fout:\n for i in range(1000):\n fout.write('0\\n')\n\n data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8,8),\n label_csv=label_path, batch_size=100)\n expected = mx.nd.ones((100, 8, 8))\n for batch in iter(data_train):\n assert_almost_equal(data_train.getdata().asnumpy(), expected.asnumpy())\n\n check_CSVIter_synthetic()\n\nif __name__ == \"__main__\":\n test_NDArrayIter()\n if h5py:\n test_NDArrayIter_h5py()\n test_MNISTIter()\n test_Cifar10Rec()\n test_LibSVMIter()\n test_NDArrayIter_csr()\n test_CSVIter()\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
flaviofontes29/Machine-Learning-e-Data-Science-com-Python | [
"7b8188b6e7003426ae3a6d46d91d61494135a2b7"
] | [
"Secao 3 - Pre-processamento com Pandas e scikit-learm/template_credit_data.py"
] | [
"import pandas as pd\nimport numpy as np\n\nbase = pd.read_csv('credit_data.csv')\nbase.loc[base.age < 0, 'age'] = 40.92\n \nprevisores = base.iloc[:, 1:4].values\nclasse = base.iloc[:, 4].values\n\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')\nimputer = imputer.fit(previsores[:, 1:4])\nprevisores[:, 1:4] = imputer.transform(previsores[:, 1:4])\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nprevisores = scaler.fit_transform(previsores)\n\nfrom sklearn.model_selection import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.25, random_state=0)\n"
] | [
[
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.impute.SimpleImputer",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tomvdon/lidar-bonnetal | [
"0bb78eb9a731e98e6f3b893d735b6c3ca96cb0e8"
] | [
"train_test_split.py"
] | [
"import shutil\nimport os\nimport glob\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport shutil\nimport re\n\nclouds = glob.glob('range_images/point_cloud_*.pcd')\ntrain, test = train_test_split(clouds, test_size=0.20, random_state=42)\nfor file in train:\n shutil.copy(file, \"simulated_data/sequences/00\")\n number = re.findall(r'[0-9]+', file)[0]\n label = os.path.join(os.path.sep.join(file.split(os.sep)[:-1]), \"labels\",\n \"label_\" + number + \".npy\")\n shutil.copy(label, \"simulated_data/sequences/00/labels\")\nfor file in test:\n shutil.copy(file, \"simulated_data/sequences/01\")\n number = re.findall(r'[0-9]+', file)[0]\n label = os.path.join(os.path.sep.join(file.split(os.sep)[:-1]), \"labels\",\n \"label_\" + number + \".npy\")\n shutil.copy(label, \"simulated_data/sequences/01/labels\")\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arunraja-hub/transformers | [
"76cadb7943c8492ec481f4f3925e9e8793a32c9d",
"3f51e6a35871fefbdfb705902355d7530a72d1b8",
"eb2e006b35938e7b6476d3bfc55343ebfe5ec501"
] | [
"tests/test_modeling_flax_vit.py",
"src/transformers/data/data_collator.py",
"examples/research_projects/wav2vec2/run_asr.py"
] | [
"# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport unittest\n\nimport numpy as np\n\nfrom transformers import ViTConfig, is_flax_available\nfrom transformers.testing_utils import require_flax, slow\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor\n\n\nif is_flax_available():\n\n import jax\n from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel\n\n\nclass FlaxViTModelTester(unittest.TestCase):\n def __init__(\n self,\n parent,\n batch_size=13,\n image_size=30,\n patch_size=2,\n num_channels=3,\n is_training=True,\n use_labels=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n type_sequence_label_size=10,\n initializer_range=0.02,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.is_training = is_training\n self.use_labels = use_labels\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n config = ViTConfig(\n image_size=self.image_size,\n patch_size=self.patch_size,\n num_channels=self.num_channels,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n is_decoder=False,\n initializer_range=self.initializer_range,\n )\n\n return config, pixel_values\n\n def create_and_check_model(self, config, pixel_values, labels):\n\n model = FlaxViTModel(config=config)\n result = model(pixel_values)\n # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)\n image_size = (self.image_size, self.image_size)\n patch_size = (self.patch_size, self.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n pixel_values,\n ) = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_flax\nclass FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()\n\n def setUp(self) -> None:\n self.model_tester = FlaxViTModelTester(self)\n self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n # We need to override this test because in ViT, the seq_len equals the number of patches + 1\n # we compute that here\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n num_patches = (config.image_size // config.patch_size) ** 2\n seq_length = num_patches + 1\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = False\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n # check that output_attentions also work using config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_length, seq_length],\n )\n out_len = len(outputs)\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n added_hidden_states = 1\n self.assertEqual(out_len + added_hidden_states, len(outputs))\n\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_length, seq_length],\n )\n\n # We neeed to override this test because ViT's forward signature is different than text models.\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.__call__)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n # We neeed to override this test because ViT expects pixel_values instead of input_ids\n @slow\n def test_jit_compilation(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n\n @jax.jit\n def model_jitted(pixel_values, **kwargs):\n return model(pixel_values=pixel_values, **kwargs)\n\n with self.subTest(\"JIT Enabled\"):\n jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n with self.subTest(\"JIT Disabled\"):\n with jax.disable_jit():\n outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n self.assertEqual(len(outputs), len(jitted_outputs))\n for jitted_output, output in zip(jitted_outputs, outputs):\n self.assertEqual(jitted_output.shape, output.shape)\n\n # We need to override this test because in ViT, the seq_len equals the number of patches + 1\n # we compute that here\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n num_patches = (config.image_size // config.patch_size) ** 2\n seq_length = num_patches + 1 # we add 1 for the [CLS] token\n\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n hidden_states = outputs.hidden_states\n\n self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)\n\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [seq_length, self.model_tester.hidden_size],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n @slow\n def test_model_from_pretrained(self):\n for model_class_name in self.all_model_classes:\n model = model_class_name.from_pretrained(\"google/vit-base-patch16-224\")\n outputs = model(np.ones((1, 3, 224, 224)))\n self.assertIsNotNone(outputs)\n",
"# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union\n\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom ..file_utils import PaddingStrategy\nfrom ..modeling_utils import PreTrainedModel\nfrom ..models.bert import BertTokenizer, BertTokenizerFast\nfrom ..tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase\n\n\nInputDataClass = NewType(\"InputDataClass\", Any)\n\n\"\"\"\nA DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary\nof Tensors.\n\"\"\"\nDataCollator = NewType(\"DataCollator\", Callable[[List[InputDataClass]], Dict[str, torch.Tensor]])\n\n\ndef default_data_collator(features: List[InputDataClass]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Very simple data collator that simply collates batches of dict-like objects and performs special handling for\n potential keys named:\n\n - ``label``: handles a single value (int or float) per object\n - ``label_ids``: handles a list of values per object\n\n Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs\n to the model. See glue and ner for example of how it's useful.\n \"\"\"\n\n # In this function we'll make the assumption that all `features` in the batch\n # have the same attributes.\n # So we will look at the first element as a proxy for what attributes exist\n # on the whole batch.\n if not isinstance(features[0], (dict, BatchEncoding)):\n features = [vars(f) for f in features]\n\n first = features[0]\n batch = {}\n\n # Special handling for labels.\n # Ensure that tensor is created with the correct type\n # (it should be automatically the case, but let's make sure of it.)\n if \"label\" in first and first[\"label\"] is not None:\n label = first[\"label\"].item() if isinstance(first[\"label\"], torch.Tensor) else first[\"label\"]\n dtype = torch.long if isinstance(label, int) else torch.float\n batch[\"labels\"] = torch.tensor([f[\"label\"] for f in features], dtype=dtype)\n elif \"label_ids\" in first and first[\"label_ids\"] is not None:\n if isinstance(first[\"label_ids\"], torch.Tensor):\n batch[\"labels\"] = torch.stack([f[\"label_ids\"] for f in features])\n else:\n dtype = torch.long if type(first[\"label_ids\"][0]) is int else torch.float\n batch[\"labels\"] = torch.tensor([f[\"label_ids\"] for f in features], dtype=dtype)\n\n # Handling of all other possible keys.\n # Again, we will use the first element to figure out which key/values are not None for this model.\n for k, v in first.items():\n if k not in (\"label\", \"label_ids\") and v is not None and not isinstance(v, str):\n if isinstance(v, torch.Tensor):\n batch[k] = torch.stack([f[k] for f in features])\n else:\n batch[k] = torch.tensor([f[k] for f in features])\n\n return batch\n\n\n@dataclass\nclass DataCollatorWithPadding:\n \"\"\"\n Data collator that will dynamically pad the inputs received.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n batch = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"pt\",\n )\n if \"label\" in batch:\n batch[\"labels\"] = batch[\"label\"]\n del batch[\"label\"]\n if \"label_ids\" in batch:\n batch[\"labels\"] = batch[\"label_ids\"]\n del batch[\"label_ids\"]\n return batch\n\n\n@dataclass\nclass DataCollatorForTokenClassification:\n \"\"\"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (:obj:`int`, `optional`, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n\n def __call__(self, features):\n label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None\n batch = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n # Conversion to tensors will fail if we have labels as they are not of the same length yet.\n return_tensors=\"pt\" if labels is None else None,\n )\n\n if labels is None:\n return batch\n\n sequence_length = torch.tensor(batch[\"input_ids\"]).shape[1]\n padding_side = self.tokenizer.padding_side\n if padding_side == \"right\":\n batch[\"labels\"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]\n else:\n batch[\"labels\"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]\n\n batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n return batch\n\n\ndef _collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):\n \"\"\"Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.\"\"\"\n # Tensorize if necessary.\n if isinstance(examples[0], (list, tuple)):\n examples = [torch.tensor(e, dtype=torch.long) for e in examples]\n\n # Check if padding is necessary.\n length_of_first = examples[0].size(0)\n are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)\n if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):\n return torch.stack(examples, dim=0)\n\n # If yes, check if we have a `pad_token`.\n if tokenizer._pad_token is None:\n raise ValueError(\n \"You are attempting to pad samples but the tokenizer you are using\"\n f\" ({tokenizer.__class__.__name__}) does not have a pad token.\"\n )\n\n # Creating the full tensor and filling it with our data.\n max_length = max(x.size(0) for x in examples)\n if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)\n for i, example in enumerate(examples):\n if tokenizer.padding_side == \"right\":\n result[i, : example.shape[0]] = example\n else:\n result[i, -example.shape[0] :] = example\n return result\n\n\ndef tolist(x: Union[List[Any], torch.Tensor]):\n return x.tolist() if isinstance(x, torch.Tensor) else x\n\n\n@dataclass\nclass DataCollatorForSeq2Seq:\n \"\"\"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n model (:class:`~transformers.PreTrainedModel`):\n The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to\n prepare the `decoder_input_ids`\n\n This is useful when using `label_smoothing` to avoid calculating loss twice.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence is provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (:obj:`int`, `optional`, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n model: Optional[PreTrainedModel] = None\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n\n def __call__(self, features):\n labels = [feature[\"labels\"] for feature in features] if \"labels\" in features[0].keys() else None\n # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the\n # same length to return tensors.\n if labels is not None:\n max_label_length = max(len(l) for l in labels)\n padding_side = self.tokenizer.padding_side\n for feature in features:\n remainder = [self.label_pad_token_id] * (max_label_length - len(feature[\"labels\"]))\n feature[\"labels\"] = (\n feature[\"labels\"] + remainder if padding_side == \"right\" else remainder + feature[\"labels\"]\n )\n\n features = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"pt\",\n )\n\n # prepare decoder_input_ids\n if self.model is not None and hasattr(self.model, \"prepare_decoder_input_ids_from_labels\"):\n decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features[\"labels\"])\n features[\"decoder_input_ids\"] = decoder_input_ids\n\n return features\n\n\n@dataclass\nclass DataCollatorForLanguageModeling:\n \"\"\"\n Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they\n are not all of the same length.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n mlm (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the\n inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for\n non-masked tokens and the value to predict for the masked token.\n mlm_probability (:obj:`float`, `optional`, defaults to 0.15):\n The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n .. note::\n\n For best performance, this data collator should be used with a dataset having items that are dictionaries or\n BatchEncoding, with the :obj:`\"special_tokens_mask\"` key, as returned by a\n :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the\n argument :obj:`return_special_tokens_mask=True`.\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n mlm: bool = True\n mlm_probability: float = 0.15\n pad_to_multiple_of: Optional[int] = None\n\n def __post_init__(self):\n if self.mlm and self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. \"\n \"You should pass `mlm=False` to train on causal language modeling instead.\"\n )\n\n def __call__(\n self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n # Handle dict or lists with proper padding and conversion to tensor.\n if isinstance(examples[0], (dict, BatchEncoding)):\n batch = self.tokenizer.pad(examples, return_tensors=\"pt\", pad_to_multiple_of=self.pad_to_multiple_of)\n else:\n batch = {\"input_ids\": _collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)}\n\n # If special token mask has been preprocessed, pop it from the dict.\n special_tokens_mask = batch.pop(\"special_tokens_mask\", None)\n if self.mlm:\n batch[\"input_ids\"], batch[\"labels\"] = self.mask_tokens(\n batch[\"input_ids\"], special_tokens_mask=special_tokens_mask\n )\n else:\n labels = batch[\"input_ids\"].clone()\n if self.tokenizer.pad_token_id is not None:\n labels[labels == self.tokenizer.pad_token_id] = -100\n batch[\"labels\"] = labels\n return batch\n\n def mask_tokens(\n self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.\n \"\"\"\n labels = inputs.clone()\n # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)\n probability_matrix = torch.full(labels.shape, self.mlm_probability)\n if special_tokens_mask is None:\n special_tokens_mask = [\n self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)\n else:\n special_tokens_mask = special_tokens_mask.bool()\n\n probability_matrix.masked_fill_(special_tokens_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\n@dataclass\nclass DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):\n \"\"\"\n Data collator used for language modeling that masks entire words.\n\n - collates batches of tensors, honoring their tokenizer's pad_token\n - preprocesses batches for masked language modeling\n\n .. note::\n\n This collator relies on details of the implementation of subword tokenization by\n :class:`~transformers.BertTokenizer`, specifically that subword tokens are prefixed with `##`. For tokenizers\n that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to\n :class:`.DataCollatorForLanguageModeling`.\n \"\"\"\n\n def __call__(\n self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n if isinstance(examples[0], (dict, BatchEncoding)):\n input_ids = [e[\"input_ids\"] for e in examples]\n else:\n input_ids = examples\n examples = [{\"input_ids\": e} for e in examples]\n\n batch_input = _collate_batch(input_ids, self.tokenizer)\n\n mask_labels = []\n for e in examples:\n ref_tokens = []\n for id in tolist(e[\"input_ids\"]):\n token = self.tokenizer._convert_id_to_token(id)\n ref_tokens.append(token)\n\n # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]\n if \"chinese_ref\" in e:\n ref_pos = tolist(e[\"chinese_ref\"])\n len_seq = len(e[\"input_ids\"])\n for i in range(len_seq):\n if i in ref_pos:\n ref_tokens[i] = \"##\" + ref_tokens[i]\n mask_labels.append(self._whole_word_mask(ref_tokens))\n batch_mask = _collate_batch(mask_labels, self.tokenizer)\n inputs, labels = self.mask_tokens(batch_input, batch_mask)\n return {\"input_ids\": inputs, \"labels\": labels}\n\n def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):\n \"\"\"\n Get 0/1 labels for masked tokens with whole word mask proxy\n \"\"\"\n if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):\n warnings.warn(\n \"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers.\"\n \"Please refer to the documentation for more information.\"\n )\n\n cand_indexes = []\n for (i, token) in enumerate(input_tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n\n if len(cand_indexes) >= 1 and token.startswith(\"##\"):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n random.shuffle(cand_indexes)\n num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n masked_lms.append(index)\n\n assert len(covered_indexes) == len(masked_lms)\n mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]\n return mask_labels\n\n def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set\n 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.\n \"\"\"\n\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n\n probability_matrix = mask_labels\n\n special_tokens_mask = [\n self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if self.tokenizer._pad_token is not None:\n padding_mask = labels.eq(self.tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n\n masked_indices = probability_matrix.bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\n@dataclass\nclass DataCollatorForSOP(DataCollatorForLanguageModeling):\n \"\"\"\n Data collator used for sentence order prediction task.\n\n - collates batches of tensors, honoring their tokenizer's pad_token\n - preprocesses batches for both masked language modeling and sentence order prediction\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n \"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use \"\n \"DataCollatorForLanguageModeling instead.\",\n FutureWarning,\n )\n\n def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n input_ids = [example[\"input_ids\"] for example in examples]\n input_ids = _collate_batch(input_ids, self.tokenizer)\n input_ids, labels, attention_mask = self.mask_tokens(input_ids)\n\n token_type_ids = [example[\"token_type_ids\"] for example in examples]\n # size of segment_ids varied because randomness, padding zero to the end as the original implementation\n token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)\n\n sop_label_list = [example[\"sentence_order_label\"] for example in examples]\n sentence_order_label = torch.stack(sop_label_list)\n\n return {\n \"input_ids\": input_ids,\n \"labels\": labels,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n \"sentence_order_label\": sentence_order_label,\n }\n\n def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%\n original. N-gram not applied yet.\n \"\"\"\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, self.mlm_probability)\n special_tokens_mask = [\n self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if self.tokenizer._pad_token is not None:\n padding_mask = labels.eq(self.tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value\n attention_mask = (~masked_indices).float()\n if self.tokenizer._pad_token is not None:\n attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)\n attention_mask.masked_fill_(attention_padding_mask, value=1.0)\n labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels, attention_mask\n\n\n@dataclass\nclass DataCollatorForPermutationLanguageModeling:\n \"\"\"\n Data collator used for permutation language modeling.\n\n - collates batches of tensors, honoring their tokenizer's pad_token\n - preprocesses batches for permutation language modeling with procedures specific to XLNet\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n plm_probability: float = 1 / 6\n max_span_length: int = 5 # maximum length of a span of masked tokens\n\n def __call__(\n self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n if isinstance(examples[0], (dict, BatchEncoding)):\n examples = [e[\"input_ids\"] for e in examples]\n batch = _collate_batch(examples, self.tokenizer)\n inputs, perm_mask, target_mapping, labels = self.mask_tokens(batch)\n return {\"input_ids\": inputs, \"perm_mask\": perm_mask, \"target_mapping\": target_mapping, \"labels\": labels}\n\n def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n The masked tokens to be predicted for a particular sequence are determined by the following algorithm:\n\n 0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).\n 1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be\n masked)\n 2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be\n masked\n 3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -\n span_length]`` and mask tokens ``start_index:start_index + span_length``\n 4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in\n the sequence to be processed), repeat from Step 1.\n \"\"\"\n\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.\"\n )\n\n if inputs.size(1) % 2 != 0:\n raise ValueError(\n \"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.\"\n )\n\n labels = inputs.clone()\n # Creating the mask and target_mapping tensors\n masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)\n target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)\n\n for i in range(labels.size(0)):\n # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).\n cur_len = 0\n max_len = labels.size(1)\n\n while cur_len < max_len:\n # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)\n span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()\n # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked\n context_length = int(span_length / self.plm_probability)\n # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`\n start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()\n masked_indices[i, start_index : start_index + span_length] = 1\n # Set `cur_len = cur_len + context_length`\n cur_len += context_length\n\n # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,\n # the i-th predict corresponds to the i-th token.\n target_mapping[i] = torch.eye(labels.size(1))\n\n special_tokens_mask = torch.tensor(\n [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],\n dtype=torch.bool,\n )\n masked_indices.masked_fill_(special_tokens_mask, value=0.0)\n if self.tokenizer._pad_token is not None:\n padding_mask = labels.eq(self.tokenizer.pad_token_id)\n masked_indices.masked_fill_(padding_mask, value=0.0)\n\n # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.\n non_func_mask = ~(padding_mask | special_tokens_mask)\n\n inputs[masked_indices] = self.tokenizer.mask_token_id\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)\n\n for i in range(labels.size(0)):\n # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will\n # determine which tokens a given token can attend to (encoded in `perm_mask`).\n # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length\n # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,\n # we assume that reused length is half of sequence length and permutation length is equal to reused length.\n # This requires that the sequence length be even.\n\n # Create a linear factorisation order\n perm_index = torch.arange(labels.size(1))\n # Split this into two halves, assuming that half the sequence is reused each time\n perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)\n # Permute the two halves such that they do not cross over\n perm_index = perm_index[torch.randperm(labels.size(1) // 2)]\n # Flatten this out into the desired permuted factorisation order\n perm_index = torch.flatten(perm_index.transpose(0, 1))\n # Set the permutation indices of non-masked (non-functional) tokens to the\n # smallest index (-1) so that:\n # (1) They can be seen by all other positions\n # (2) They cannot see masked positions, so there won't be information leak\n perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)\n # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:\n # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token\n # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token\n perm_mask[i] = (\n perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))\n ) & masked_indices[i]\n\n return inputs.long(), perm_mask, target_mapping, labels.long()\n",
"#!/usr/bin/env python3\nimport logging\nimport pathlib\nimport re\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Any, Callable, Dict, List, Optional, Set, Union\n\nimport datasets\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\n\nimport librosa\nfrom lang_trans import arabic\nfrom transformers import (\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n Wav2Vec2CTCTokenizer,\n Wav2Vec2FeatureExtractor,\n Wav2Vec2ForCTC,\n Wav2Vec2Processor,\n is_apex_available,\n trainer_utils,\n)\n\n\nif is_apex_available():\n from apex import amp\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n freeze_feature_extractor: Optional[bool] = field(\n default=True, metadata={\"help\": \"Whether to freeze the feature extractor layers of the model.\"}\n )\n gradient_checkpointing: Optional[bool] = field(\n default=False, metadata={\"help\": \"Whether to freeze the feature extractor layers of the model.\"}\n )\n verbose_logging: Optional[bool] = field(\n default=False,\n metadata={\"help\": \"Whether to log verbose messages or not.\"},\n )\n\n\ndef configure_logger(model_args: ModelArguments, training_args: TrainingArguments):\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n logging_level = logging.WARNING\n if model_args.verbose_logging:\n logging_level = logging.DEBUG\n elif trainer_utils.is_main_process(training_args.local_rank):\n logging_level = logging.INFO\n logger.setLevel(logging_level)\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n\n Using `HfArgumentParser` we can turn this class\n into argparse arguments to be able to specify them on\n the command line.\n \"\"\"\n\n dataset_name: str = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_split_name: Optional[str] = field(\n default=\"train\",\n metadata={\n \"help\": \"The name of the training data set split to use (via the datasets library). Defaults to 'train'\"\n },\n )\n validation_split_name: Optional[str] = field(\n default=\"validation\",\n metadata={\n \"help\": \"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'\"\n },\n )\n target_text_column: Optional[str] = field(\n default=\"text\",\n metadata={\"help\": \"Column in the dataset that contains label (target text). Defaults to 'text'\"},\n )\n speech_file_column: Optional[str] = field(\n default=\"file\",\n metadata={\"help\": \"Column in the dataset that contains speech file path. Defaults to 'file'\"},\n )\n target_feature_extractor_sampling_rate: Optional[bool] = field(\n default=False,\n metadata={\"help\": \"Resample loaded audio to target feature extractor's sampling rate or not.\"},\n )\n max_duration_in_seconds: Optional[float] = field(\n default=None,\n metadata={\"help\": \"Filters out examples longer than specified. Defaults to no filtering.\"},\n )\n orthography: Optional[str] = field(\n default=\"librispeech\",\n metadata={\n \"help\": \"Orthography used for normalization and tokenization: 'librispeech' (default), 'timit', or 'buckwalter'.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached preprocessed datasets or not.\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n\n\n@dataclass\nclass Orthography:\n \"\"\"\n Orthography scheme used for text normalization and tokenization.\n\n Args:\n do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to accept lowercase input and lowercase the output when decoding.\n vocab_file (:obj:`str`, `optional`):\n File containing the vocabulary.\n word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`\"|\"`):\n The token used for delimiting words; it needs to be in the vocabulary.\n translation_table (:obj:`Dict[str, str]`, `optional`, defaults to :obj:`{}`):\n Table to use with `str.translate()` when preprocessing text (e.g., \"-\" -> \" \").\n words_to_remove (:obj:`Set[str]`, `optional`, defaults to :obj:`set()`):\n Words to remove when preprocessing text (e.g., \"sil\").\n untransliterator (:obj:`Callable[[str], str]`, `optional`):\n Function that untransliterates text back into native writing system.\n \"\"\"\n\n do_lower_case: bool = False\n vocab_file: Optional[str] = None\n word_delimiter_token: Optional[str] = \"|\"\n translation_table: Optional[Dict[str, str]] = field(default_factory=dict)\n words_to_remove: Optional[Set[str]] = field(default_factory=set)\n untransliterator: Optional[Callable[[str], str]] = None\n\n @classmethod\n def from_name(cls, name: str):\n if name == \"librispeech\":\n return cls()\n if name == \"timit\":\n return cls(\n do_lower_case=True,\n # break compounds like \"quarter-century-old\" and replace pauses \"--\"\n translation_table=str.maketrans({\"-\": \" \"}),\n )\n if name == \"buckwalter\":\n translation_table = {\n \"-\": \" \", # sometimes used to represent pauses\n \"^\": \"v\", # fixing \"tha\" in arabic_speech_corpus dataset\n }\n return cls(\n vocab_file=pathlib.Path(__file__).parent.joinpath(\"vocab/buckwalter.json\"),\n word_delimiter_token=\"/\", # \"|\" is Arabic letter alef with madda above\n translation_table=str.maketrans(translation_table),\n words_to_remove={\"sil\"}, # fixing \"sil\" in arabic_speech_corpus dataset\n untransliterator=arabic.buckwalter.untransliterate,\n )\n raise ValueError(f\"Unsupported orthography: '{name}'.\")\n\n def preprocess_for_training(self, text: str) -> str:\n # TODO(elgeish) return a pipeline (e.g., from jiwer) instead? Or rely on branch predictor as is\n if len(self.translation_table) > 0:\n text = text.translate(self.translation_table)\n if len(self.words_to_remove) == 0:\n text = \" \".join(text.split()) # clean up whitespaces\n else:\n text = \" \".join(w for w in text.split() if w not in self.words_to_remove) # and clean up whilespaces\n return text\n\n def create_processor(self, model_args: ModelArguments) -> Wav2Vec2Processor:\n feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n model_args.model_name_or_path, cache_dir=model_args.cache_dir\n )\n if self.vocab_file:\n tokenizer = Wav2Vec2CTCTokenizer(\n self.vocab_file,\n cache_dir=model_args.cache_dir,\n do_lower_case=self.do_lower_case,\n word_delimiter_token=self.word_delimiter_token,\n )\n else:\n tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n do_lower_case=self.do_lower_case,\n word_delimiter_token=self.word_delimiter_token,\n )\n return Wav2Vec2Processor(feature_extractor, tokenizer)\n\n\n@dataclass\nclass DataCollatorCTCWithPadding:\n \"\"\"\n Data collator that will dynamically pad the inputs received.\n Args:\n processor (:class:`~transformers.Wav2Vec2Processor`)\n The processor used for proccessing the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).\n max_length_labels (:obj:`int`, `optional`):\n Maximum length of the ``labels`` returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n \"\"\"\n\n processor: Wav2Vec2Processor\n padding: Union[bool, str] = True\n max_length: Optional[int] = None\n max_length_labels: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n pad_to_multiple_of_labels: Optional[int] = None\n\n def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n # split inputs and labels since they have to be of different lenghts and need\n # different padding methods\n input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features]\n label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n\n batch = self.processor.pad(\n input_features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"pt\",\n )\n with self.processor.as_target_processor():\n labels_batch = self.processor.pad(\n label_features,\n padding=self.padding,\n max_length=self.max_length_labels,\n pad_to_multiple_of=self.pad_to_multiple_of_labels,\n return_tensors=\"pt\",\n )\n\n # replace padding with -100 to ignore loss correctly\n labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n\n batch[\"labels\"] = labels\n\n return batch\n\n\nclass CTCTrainer(Trainer):\n def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to train.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n :obj:`torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if self.use_amp:\n with autocast():\n loss = self.compute_loss(model, inputs)\n else:\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n if model.module.config.ctc_loss_reduction == \"mean\":\n loss = loss.mean()\n elif model.module.config.ctc_loss_reduction == \"sum\":\n loss = loss.sum() / (inputs[\"labels\"] >= 0).sum()\n else:\n raise ValueError(f\"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']\")\n\n if self.args.gradient_accumulation_steps > 1:\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.use_amp:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n configure_logger(model_args, training_args)\n\n orthography = Orthography.from_name(data_args.orthography.lower())\n processor = orthography.create_processor(model_args)\n model = Wav2Vec2ForCTC.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n gradient_checkpointing=model_args.gradient_checkpointing,\n vocab_size=len(processor.tokenizer),\n )\n\n train_dataset = datasets.load_dataset(\n data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name\n )\n val_dataset = datasets.load_dataset(\n data_args.dataset_name, data_args.dataset_config_name, split=data_args.validation_split_name\n )\n\n wer_metric = datasets.load_metric(\"wer\")\n target_sr = processor.feature_extractor.sampling_rate if data_args.target_feature_extractor_sampling_rate else None\n vocabulary_chars_str = \"\".join(t for t in processor.tokenizer.get_vocab().keys() if len(t) == 1)\n vocabulary_text_cleaner = re.compile( # remove characters not in vocabulary\n f\"[^\\s{re.escape(vocabulary_chars_str)}]\", # allow space in addition to chars in vocabulary\n flags=re.IGNORECASE if processor.tokenizer.do_lower_case else 0,\n )\n text_updates = []\n\n def prepare_example(example): # TODO(elgeish) make use of multiprocessing?\n example[\"speech\"], example[\"sampling_rate\"] = librosa.load(example[data_args.speech_file_column], sr=target_sr)\n if data_args.max_duration_in_seconds is not None:\n example[\"duration_in_seconds\"] = len(example[\"speech\"]) / example[\"sampling_rate\"]\n # Normalize and clean up text; order matters!\n updated_text = orthography.preprocess_for_training(example[data_args.target_text_column])\n updated_text = vocabulary_text_cleaner.sub(\"\", updated_text)\n if updated_text != example[data_args.target_text_column]:\n text_updates.append((example[data_args.target_text_column], updated_text))\n example[data_args.target_text_column] = updated_text\n return example\n\n train_dataset = train_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column])\n val_dataset = val_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column])\n\n if data_args.max_duration_in_seconds is not None:\n\n def filter_by_max_duration(example):\n return example[\"duration_in_seconds\"] <= data_args.max_duration_in_seconds\n\n old_train_size = len(train_dataset)\n old_val_size = len(val_dataset)\n train_dataset = train_dataset.filter(filter_by_max_duration, remove_columns=[\"duration_in_seconds\"])\n val_dataset = val_dataset.filter(filter_by_max_duration, remove_columns=[\"duration_in_seconds\"])\n if len(train_dataset) > old_train_size:\n logger.warning(\n f\"Filtered out {len(train_dataset) - old_train_size} train example(s) longer than {data_args.max_duration_in_seconds} second(s).\"\n )\n if len(val_dataset) > old_val_size:\n logger.warning(\n f\"Filtered out {len(val_dataset) - old_val_size} validation example(s) longer than {data_args.max_duration_in_seconds} second(s).\"\n )\n logger.info(f\"Split sizes: {len(train_dataset)} train and {len(val_dataset)} validation.\")\n\n logger.warning(f\"Updated {len(text_updates)} transcript(s) using '{data_args.orthography}' orthography rules.\")\n if logger.isEnabledFor(logging.DEBUG):\n for original_text, updated_text in text_updates:\n logger.debug(f'Updated text: \"{original_text}\" -> \"{updated_text}\"')\n text_updates = None\n\n def prepare_dataset(batch):\n # check that all files have the correct sampling rate\n assert (\n len(set(batch[\"sampling_rate\"])) == 1\n ), f\"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.\"\n\n batch[\"input_values\"] = processor(batch[\"speech\"], sampling_rate=batch[\"sampling_rate\"][0]).input_values\n with processor.as_target_processor():\n batch[\"labels\"] = processor(batch[data_args.target_text_column]).input_ids\n return batch\n\n train_dataset = train_dataset.map(\n prepare_dataset,\n batch_size=training_args.per_device_train_batch_size,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n )\n val_dataset = val_dataset.map(\n prepare_dataset,\n batch_size=training_args.per_device_train_batch_size,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n )\n\n data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)\n\n def compute_metrics(pred):\n pred_logits = pred.predictions\n pred_ids = np.argmax(pred_logits, axis=-1)\n\n pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id\n\n pred_str = processor.batch_decode(pred_ids)\n # we do not want to group tokens when computing the metrics\n label_str = processor.batch_decode(pred.label_ids, group_tokens=False)\n if logger.isEnabledFor(logging.DEBUG):\n for reference, predicted in zip(label_str, pred_str):\n logger.debug(f'reference: \"{reference}\"')\n logger.debug(f'predicted: \"{predicted}\"')\n if orthography.untransliterator is not None:\n logger.debug(f'reference (untransliterated): \"{orthography.untransliterator(reference)}\"')\n logger.debug(f'predicted (untransliterated): \"{orthography.untransliterator(predicted)}\"')\n\n wer = wer_metric.compute(predictions=pred_str, references=label_str)\n\n return {\"wer\": wer}\n\n if model_args.freeze_feature_extractor:\n model.freeze_feature_extractor()\n\n trainer = CTCTrainer(\n model=model,\n data_collator=data_collator,\n args=training_args,\n compute_metrics=compute_metrics,\n train_dataset=train_dataset,\n eval_dataset=val_dataset,\n tokenizer=processor.feature_extractor,\n )\n\n trainer.train()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.ones"
],
[
"torch.randint",
"torch.full",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor",
"torch.bernoulli",
"torch.stack"
],
[
"numpy.argmax",
"torch.cuda.amp.autocast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yamamon75/PmagPy | [
"fa5b189800a239683fc17c6b312cdfdd839a46c3",
"fa5b189800a239683fc17c6b312cdfdd839a46c3",
"fa5b189800a239683fc17c6b312cdfdd839a46c3"
] | [
"pmagpy/controlled_vocabularies2.py",
"programs/incfish.py",
"programs/zeq_magic.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport json\nimport os\nfrom builtins import object\n\nimport pandas as pd\nfrom pandas import Series\nfrom . import find_pmag_dir\n\npmag_dir = find_pmag_dir.get_pmag_dir()\ndata_model_dir = os.path.join(pmag_dir, 'pmagpy', 'data_model')\n# if using with py2app, the directory structure is flat,\n# so check to see where the resource actually is\nif not os.path.exists(data_model_dir):\n data_model_dir = os.path.join(pmag_dir, 'data_model')\n\n\nclass Vocabulary(object):\n\n def __init__(self):\n self.vocabularies = []\n self.possible_vocabularies = []\n self.all_codes = []\n self.code_types = []\n self.er_methods = []\n self.pmag_methods = []\n self.age_methods = []\n\n def get_one_meth_type(self, mtype, method_list):\n \"\"\"\n Get all codes of one type (i.e., 'anisotropy_estimation')\n \"\"\"\n cond = method_list['dtype'] == mtype\n codes = method_list[cond]\n return codes\n\n def get_one_meth_category(self, category, all_codes, code_types):\n \"\"\"\n Get all codes in one category (i.e., all pmag codes).\n This can include multiple method types (i.e., 'anisotropy_estimation', 'sample_prepartion', etc.)\n \"\"\"\n categories = Series(code_types[code_types[category] == True].index)\n cond = all_codes['dtype'].isin(categories)\n codes = all_codes[cond]\n return codes\n\n def get_tiered_meth_category_offline(self, category):\n path = os.path.join(data_model_dir, '{}_methods.txt'.format(category))\n dfile = open(path)\n json_data = json.load(dfile)\n dfile.close()\n return json_data\n\n def get_meth_codes(self):\n print('-I- Getting cached method codes for 2.5')\n er_methods = self.get_tiered_meth_category_offline('er')\n pmag_methods = self.get_tiered_meth_category_offline('pmag')\n age_methods = self.get_tiered_meth_category_offline('age')\n path = os.path.join(data_model_dir, 'code_types.txt')\n with open(path, 'r') as type_file:\n raw_code_types = json.load(type_file)\n code_types = pd.read_json(raw_code_types)\n path = os.path.join(data_model_dir, 'all_codes.txt')\n with open(path, 'r') as code_file:\n raw_all_codes = json.load(code_file)\n all_codes = pd.read_json(raw_all_codes)\n self.er_methods = er_methods\n self.pmag_methods = pmag_methods\n self.age_methods = age_methods\n self.all_codes = all_codes\n self.code_types = code_types\n\n def get_vocabularies(self):\n print('-I- Getting cached controlled vocabularies for 2.5')\n ## skip trying to get method codes etc. dynamically.\n ## 2.5 method codes etc. are no longer available on earthref\n #all_codes, code_types = self.get_meth_codes()\n #if any(all_codes):\n # er_methods = self.get_tiered_meth_category('er', all_codes, code_types)\n # pmag_methods = self.get_tiered_meth_category('pmag', all_codes, code_types)\n # age_methods = self.get_tiered_meth_category('age', all_codes, code_types)\n #else:\n #\n # method codes\n\n # controlled vocabularies\n path = os.path.join(data_model_dir, 'controlled_vocabularies2.json')\n with open(path, 'r') as code_file:\n raw_vocabularies = json.load(code_file)\n vocabularies = dict([(k, v) for k, v in raw_vocabularies.items()])\n self.vocabularies = vocabularies\n self.possible_vocabularies = vocabularies\n\n def get_all_vocabulary(self):\n self.get_vocabularies()\n self.get_meth_codes()\n\n\nvocab = Vocabulary()\n",
"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom builtins import input\nimport sys\nimport numpy\nimport pmagpy.pmag as pmag\n\ndef main():\n \"\"\"\n NAME\n incfish.py\n\n DESCRIPTION\n calculates fisher parameters from inc only data\n\n INPUT FORMAT\n takes inc data \n\n SYNTAX\n incfish.py [options] [< filename]\n\n OPTIONS\n -h prints help message and quits\n -i for interactive filename entry\n -f FILE, specify input file name\n -F FILE, specify output file name\n < filename for reading from standard input\n \n OUTPUT\n mean inc,Fisher inc, N, R, k, a95\n\n NOTES\n takes the absolute value of inclinations (to take into account reversals),\n but returns gaussian mean if < 50.0, because of polarity ambiguity and \n lack of bias.\n\n \"\"\"\n inc=[]\n if '-h' in sys.argv: # check if help is needed\n print(main.__doc__)\n sys.exit() # graceful quit\n if '-i' in sys.argv: # ask for filename\n file=input(\"Enter file name with inc data: \")\n inc=numpy.loadtxt(file)\n elif '-f' in sys.argv:\n ind=sys.argv.index('-f')\n file=sys.argv[ind+1]\n inc=numpy.loadtxt(file)\n else:\n inc = numpy.loadtxt(sys.stdin,dtype=numpy.float)\n ofile=\"\"\n if '-F' in sys.argv:\n ind = sys.argv.index('-F')\n ofile= sys.argv[ind+1]\n out = open(ofile, 'w + a')\n #\n #get doincfish to do the dirty work:\n fpars= pmag.doincfish(inc)\n outstring='%7.1f %7.1f %i %8.1f %7.1f %7.1f'%(fpars['ginc'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'])\n if ofile == \"\":\n print(outstring)\n else:\n out.write(outstring+'\\n')\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python\n\n# -*- python-indent-offset: 4; -*-\n\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport matplotlib\nif matplotlib.get_backend() != \"TKAgg\":\n matplotlib.use(\"TKAgg\")\n\nimport pmagpy.pmag as pmag\nimport pmagpy.pmagplotlib as pmagplotlib\nimport pmagpy.contribution_builder as cb\nfrom pmagpy import ipmag\nfrom pmag_env import set_env\n\ndef main():\n \"\"\"\n NAME\n zeq_magic.py\n DESCRIPTION\n reads in a MagIC measurements formatted file, makes plots of remanence decay\n during demagnetization experiments. Reads in prior interpretations saved in\n a specimens formatted file interpretations in a specimens file.\n interpretations are saved in the coordinate system used.\n SYNTAX\n zeq_magic.py [command line options]\n OPTIONS\n -h prints help message and quits\n -f MEASFILE: sets measurements format input file, default: measurements.txt\n -fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt\n -fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt\n -fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt\n -Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)\n -crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system\n -spc SPEC plots single specimen SPEC, saves plot with specified format\n with optional -dir settings and quits\n -dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none\n beg: starting step for PCA calculation\n end: ending step for PCA calculation\n [L,P,F]: calculation type for line, plane or fisher mean\n must be used with -spc option\n -fmt FMT: set format of saved plot [png,svg,jpg]\n -A: suppresses averaging of replicate measurements, default is to average\n -sav: saves all plots without review\n \"\"\"\n if '-h' in sys.argv:\n print(main.__doc__)\n return\n dir_path = pmag.get_named_arg(\"-WD\", default_val=os.getcwd())\n meas_file = pmag.get_named_arg(\n \"-f\", default_val=\"measurements.txt\")\n spec_file = pmag.get_named_arg(\n \"-fsp\", default_val=\"specimens.txt\")\n specimen = pmag.get_named_arg(\n \"-spc\", default_val=\"\")\n samp_file = pmag.get_named_arg(\"-fsa\", default_val=\"samples.txt\")\n site_file = pmag.get_named_arg(\"-fsi\", default_val=\"sites.txt\")\n plot_file = pmag.get_named_arg(\"-Fp\", default_val=\"\")\n crd = pmag.get_named_arg(\"-crd\", default_val=\"s\")\n fmt = pmag.get_named_arg(\"-fmt\", \"svg\")\n specimen = pmag.get_named_arg(\"-spc\", default_val=\"\")\n interactive = True\n save_plots = False\n if \"-sav\" in sys.argv:\n interactive = False\n save_plots = True\n ipmag.zeq_magic(meas_file, spec_file, crd, dir_path, n_plots=\"all\",\n save_plots=save_plots, fmt=fmt, interactive=interactive, specimen=specimen)\n\n\n#\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.Series",
"pandas.read_json"
],
[
"numpy.loadtxt"
],
[
"matplotlib.get_backend",
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
halotudio/openPNM-copy2 | [
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb",
"d400ec65e9421256a531f6d22a38255b002d5dcb"
] | [
"openpnm/io/CSV.py",
"scripts/example_salome_export.py",
"openpnm/io/PerGeos.py",
"tests/unit/models/geometry/ConduitLengthsTest.py",
"tests/unit/core/BaseTest.py",
"tests/unit/models/geometry/PoreSurfaceAreaTest.py",
"tests/unit/models/physics/FlowShapeFactorsTest.py",
"tests/unit/network/GenericNetworkTest.py",
"tests/unit/models/geometry/PoreAreaTest.py"
] | [
"import re\nimport numpy as np\nfrom openpnm.io.Pandas import Pandas\nfrom openpnm.io import GenericIO, Dict\nfrom openpnm.utils import logging, Workspace\nlogger = logging.getLogger(__name__)\nws = Workspace()\n\n\nclass CSV(GenericIO):\n r\"\"\"\n Reads and writes CSV (comma-separated-value files) containing pore and\n throat data\n\n Notes\n -----\n There are a few rules governing how the data is be stored:\n\n 1. The first row of the file (column headers) must contain the\n property names. The subsequent rows contain the data.\n\n 2. The property names should be in the usual OpenPNM format, such as\n of ``pore.volume`` or ``throat.surface_area``.\n\n 3. Each column represents a specific property. For Np x 1 or Nt x 1\n data such as *pore.volume* this is straightforward. For Np x *m* or\n Nt x *m* data, each of the *m* columns should have their own column in\n in the CSV file, with a numpy-style index indicating which axis it\n corresponds to. For instance, the *pore.coords* values should be stored\n as three separate columns with the headings: *pore.coords[0]*,\n *pore.coords[1]*, and *pore.coords[2]*. OpenPNM will convert that back\n into an Np x *m* array upon loading.\n\n 4. The file can contain both or either pore and throat data.\n\n 5. Labels can be imported by placing the characters TRUE and FALSE\n in a column corresponding to the label name (i.e. *pore.front*). TRUE\n indicates where the label applies and FALSE otherwise.\n\n \"\"\"\n\n @classmethod\n def save(cls, *args, **kwargs):\n r\"\"\"\n This method is to be deprecated. Use ``export_data`` instead.\n \"\"\"\n cls.export_data(*args, **kwargs)\n\n @classmethod\n def export_data(cls, network=None, phases=[], filename='', delim=' | '):\n r\"\"\"\n Save all the pore and throat property data on the Network (and\n optionally on any Phases objects) to CSV files.\n\n Parameters\n ----------\n network : OpenPNM Network\n The Network containing the data to be stored\n\n phases : list of OpenPNM Phases (optional)\n The Phases whose data should be stored.\n\n filename : string or path object\n The name of the file to store the data\n\n Notes\n -----\n The data from all Geometry objects is added to the file automatically.\n\n \"\"\"\n project, network, phases = cls._parse_args(network=network,\n phases=phases)\n df = Pandas.to_dataframe(network=network, phases=phases,\n join=True, delim=delim)\n # Write to file\n if filename == '':\n filename = project.name\n fname = cls._parse_filename(filename=filename, ext='csv')\n df.to_csv(fname, index=False)\n\n @classmethod\n def load(cls, *args, **kwargs):\n r\"\"\"\n This method will be deprecated. Use ``import_data`` instead.\n \"\"\"\n proj = cls.import_data(*args, **kwargs)\n return proj\n\n @classmethod\n def import_data(cls, filename, project=None, delim=' | '):\n r\"\"\"\n Opens a 'csv' file, reads in the data, and adds it to the **Network**\n\n Parameters\n ----------\n filename : string (optional)\n The name of the file containing the data to import. The formatting\n of this file is outlined below.\n\n project : OpenPNM Project object\n A GenericNetwork is created and added to the specified Project.\n If no Project object is supplied then one will be created and\n returned.\n\n Returns\n -------\n project : list\n An OpenPNM project containing the data assigned to Generic\n versions of the objects from which it was exported.\n\n \"\"\"\n from pandas import read_table\n\n if project is None:\n project = ws.new_project()\n\n fname = cls._parse_filename(filename, ext='csv')\n a = read_table(filepath_or_buffer=fname,\n sep=',',\n skipinitialspace=True,\n index_col=False,\n true_values=['T', 't', 'True', 'true', 'TRUE'],\n false_values=['F', 'f', 'False', 'false', 'FALSE'])\n\n dct = {}\n # First parse through all the items and re-merge columns\n keys = sorted(list(a.keys()))\n for item in keys:\n m = re.search(r'\\[.\\]', item) # The dot '.' is a wildcard\n if m: # m is None if pattern not found, otherwise merge cols\n pname = re.split(r'\\[.\\]', item)[0] # Get base propname\n # Find all other keys with same base propname\n merge_keys = [k for k in a.keys() if k.startswith(pname)]\n # Rerieve and remove arrays with same base propname\n merge_cols = [a.pop(k) for k in merge_keys]\n # Merge arrays into multi-column array and store in DataFrame\n dct[pname] = np.vstack(merge_cols).T\n # Remove key from list of keys\n for k in keys:\n if k.startswith(pname):\n keys.pop(keys.index(k))\n else:\n dct[item] = np.array(a.pop(item))\n\n project = Dict.from_dict(dct, project=project, delim=delim)\n\n return project\n",
"r\"\"\"\nIn the example script a generic network is created then exported as a\nSalome Python script. The script should be executed from Salome with\n\"load script\". The geometry is then built. The geometry generation on\nSalome may take some time depending on the number of pores.\n\n\"\"\"\nimport numpy as np\nimport openpnm as op\n\n\n# Workspace and project\nws = op.Workspace()\nproj = ws.new_project()\nexport = False\n\n# Network\nnp.random.seed(7)\nnet = op.network.Cubic(shape=[4, 3, 3], spacing=1e-4, project=proj)\n\n# Geometry\ngeo = op.geometry.StickAndBall(network=net, pores=net.Ps, throats=net.Ts)\n\n# Phase\nphase = op.phases.Water(network=net)\n\n# Export the network\nif export:\n proj.export_data(phases=[phase], filename='out', filetype='Salome')\n",
"import numpy\nimport numpy as np\nfrom openpnm.utils import logging\nfrom openpnm.io import GenericIO\nfrom openpnm.network import GenericNetwork\nlogger = logging.getLogger(__name__)\n\n\nclass PerGeos(GenericIO):\n r\"\"\"\n PerGeos is the format used by the Avizo software. See `here for more\n details <https://cases.pergeos.com/>`_.\n \"\"\"\n\n @classmethod\n def save(cls, *args, **kwargs):\n r\"\"\"\n This method is being deprecated. Use ``export_data`` instead.\n \"\"\"\n cls.export_data(*args, **kwargs)\n\n @classmethod\n def export_data(cls, network=None, phases=[], filename=''):\n r\"\"\"\n \"\"\"\n # avoid printing truncated array\n np.set_printoptions(threshold=np.inf)\n\n project, network, phases = cls._parse_args(network=network,\n phases=phases)\n\n # Ensure network has PerGeos' expected properties\n network = network[0]\n if 'pore.EqRadius' not in network.props():\n try:\n network['pore.EqRadius'] = network['pore.diameter']/2\n except KeyError:\n network['pore.EqRadius'] = np.ones([network.Np, ])\n\n # Add phase properties to network, if any\n for phase in phases:\n for item in phase.keys(mode='props', deep=True):\n temp = item.split('.', 1)\n new_name = temp[0] + '.' + phase.name + '.' + temp[1]\n network[new_name] = phase[item]\n\n s = [\"# Avizo 3D ASCII 3.0\\n\\n\"]\n s.append(\"define VERTEX \" + str(network.Np) + '\\n')\n s.append(\"define EDGE \" + str(network.Nt) + '\\n')\n s.append(\"define POINT \" + str(2*network.Nt) + '\\n\\n')\n s.append(\"Parameters {\\n\\tContentType \\\"HxPoreNetworkModel\\\"\\n}\\n\\n\")\n\n types = {'b': 'int', 'i': 'int', 'f': 'float'}\n typemap = {}\n namemap = {}\n shapemap = {}\n propmap = {}\n i = 1\n\n NumEdgePoints = 1\n for item in network.keys():\n typemap[item] = types[str(network[item].dtype)[0]]\n ncols = int(network[item].size/network[item].shape[0])\n if ncols > 1:\n shapemap[item] = '[' + str(ncols) + ']'\n else:\n shapemap[item] = ''\n if item.startswith('pore'):\n element = 'pore', 'VERTEX'\n if item.startswith('throat'):\n element = 'throat', 'EDGE'\n n = item.replace(element[0] + '.', '').replace('.', '_').split('_')\n n = ''.join([i[0].upper()+i[1:] for i in n if len(i)])\n namemap[item] = n\n temp = element[1] + \" { \" + typemap[item] + shapemap[item] + \" \"\\\n + namemap[item] + \" } @\" + str(i) + '\\n'\n\n if temp.find('EdgeConnectivity') == -1:\n # replaces openpnm tags with the mandatory am file's tags\n if \"Conns\" in temp:\n temp = temp.replace(\"Conns\", \"EdgeConnectivity\")\n elif \"Coords\" in temp:\n temp = temp.replace(\"Coords\", \"VertexCoordinates\")\n s.append(temp)\n propmap[item] = str(i)\n if \"NumEdgePoints\" in temp:\n NumEdgePoints = 0\n i += 1\n\n if NumEdgePoints:\n temp = \"EDGE { int NumEdgePoints\" + \" } @\" + str(i) + '\\n'\n s.append(temp)\n tempat = \"@\" + str(i) + '\\n'\n i += 1\n\n # Add POINT data\n s.append(\"POINT { float[3] EdgePointCoordinates } @\" + str(i))\n s.append(\"\\n\\n# Data section follows\")\n for item in network.keys():\n data = network[item]\n if item != 'throat.EdgeConnectivity':\n s.append('\\n\\n@' + propmap[item] + '\\n')\n if shapemap[item] == '':\n data = np.atleast_2d(data).T\n if typemap[item] == 'float':\n formatter = {'float_kind': lambda x: \"%.15E\" % x}\n else:\n formatter = None\n if data.dtype == 'bool':\n data = data.astype(int)\n d = np.array2string(data, formatter=formatter)\n s.append(d.replace('[', '').replace(']', '').replace('\\n ', '\\n'))\n\n # Add POINT data\n s.append('\\n\\n@' + str(i) + '\\n')\n formatter = {'float_kind': lambda x: \"%.15E\" % x}\n\n conns = network['throat.conns']\n d = np.array2string(network['pore.coords'][conns], formatter=formatter)\n for r in (('[', ''), (']', ''), ('\\n\\n', '\\n'), ('\\n ', '\\n'),\n ('\\n ', '\\n')):\n d = d.replace(*r)\n d += '\\n'\n s.append(d)\n\n # Add NumEdgePoints\n if NumEdgePoints:\n s.append('\\n\\n' + tempat)\n s.append(''.join(['2' + '\\n']*network.Nt))\n\n # Write to file\n if filename == '':\n filename = project.name\n fname = cls._parse_filename(filename=filename, ext='am')\n with open(fname, 'w') as f:\n f.write(''.join(s))\n\n @classmethod\n def load(cls, *args, **kwargs):\n r\"\"\"\n This method is being deprecated. Use ``import_data`` instead.\n \"\"\"\n return cls.import_data(*args, **kwargs)\n\n @classmethod\n def import_data(cls, filename, network=None):\n r\"\"\"\n \"\"\"\n net = {}\n\n # ---------------------------------------------------------------------\n # Parse the link1 file\n filename = cls._parse_filename(filename=filename, ext='am')\n with open(filename, mode='r') as f:\n Np = None\n Nt = None\n while (Np is None) or (Nt is None):\n s = f.readline()[:-1].split(' ')\n if s[0] == 'define':\n if s[1] == 'VERTEX':\n Np = int(s[2])\n if s[1] == 'EDGE':\n Nt = int(s[2])\n\n net = {}\n propmap = {}\n typemap = {}\n shapemap = {}\n while True:\n s = f.readline()[:-1].split(' ')\n if s[0] == 'VERTEX':\n dshape = [Np]\n if s[2].endswith(']'):\n ncols = int(s[2].split('[', 1)[1].split(']')[0])\n dshape.append(ncols)\n dtype = s[2].split('[')[0]\n temp = np.zeros(dshape, dtype=dtype)\n net['pore.'+s[3]] = temp\n key = int(s[-1].replace('@', ''))\n propmap[key] = 'pore.'+s[3]\n typemap[key] = dtype\n shapemap[key] = dshape\n elif s[0] == 'EDGE':\n dshape = [Nt]\n if s[2].endswith(']'):\n ncols = int(s[2].split('[', 1)[1].split(']')[0])\n dshape.append(ncols)\n dtype = s[2].split('[')[0]\n temp = np.zeros(dshape, dtype=dtype)\n net['throat.'+s[3]] = temp\n key = int(s[-1].replace('@', ''))\n propmap[key] = 'throat.'+s[3]\n typemap[key] = dtype\n shapemap[key] = dshape\n elif s[0] == '#':\n break\n\n s = f.read().split('@')\n for key in propmap.keys():\n if key in s:\n data = s[key].split('\\n')[1:]\n data = ' '.join(data)\n arr = np.fromstring(data, dtype=typemap[key], sep=' ')\n arr = np.reshape(arr, newshape=shapemap[key])\n net[propmap[key]] = arr\n # End file parsing\n\n net['pore.coords'] = net['pore.VertexCoordinates']\n net['throat.conns'] = np.sort(net['throat.EdgeConnectivity'], axis=1)\n\n if network is None:\n network = GenericNetwork()\n network = cls._update_network(network=network, net=net)\n\n return network.project\n",
"import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport openpnm as op\nimport openpnm.models.geometry.conduit_lengths as mods\n\n\nclass ConduitLengthsTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[3, 1, 1], spacing=1.0)\n self.geo = op.geometry.GenericGeometry(\n network=self.net, pores=self.net.Ps, throats=self.net.Ts\n )\n self.air = op.phases.Air(network=self.net)\n self.phys = op.physics.GenericPhysics(\n network=self.net, phase=self.air, geometry=self.geo\n )\n self.geo[\"throat.diameter\"] = 0.4\n self.geo[\"pore.diameter\"] = [0.5, 0.9, 0.7]\n\n def test_spheres_and_cylinders(self):\n L_actual = mods.spheres_and_cylinders(self.geo)\n L_desired = np.array([[0.15 , 0.44688711, 0.40311289],\n [0.40311289, 0.30965898, 0.28722813]])\n assert_allclose(L_actual, L_desired)\n # Incompatible data with model assumptions\n self.geo[\"pore.diameter\"][1] = 0.3\n with pytest.raises(Exception):\n L_actual = mods.spheres_and_cylinders(self.geo)\n self.geo[\"pore.diameter\"][1] = 0.9\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.spheres_and_cylinders(self.geo)\n L_desired = np.array([[5.78750000e-01, 1.00000000e-15, 4.21250000e-01],\n [4.03112887e-01, 3.09658980e-01, 2.87228132e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n def test_circles_and_rectangles(self):\n L_actual = mods.circles_and_rectangles(self.geo)\n L_desired = np.array([[0.15 , 0.44688711, 0.40311289],\n [0.40311289, 0.30965898, 0.28722813]])\n assert_allclose(L_actual, L_desired)\n # Incompatible data with model assumptions\n self.geo[\"pore.diameter\"][1] = 0.3\n with pytest.raises(Exception):\n L_actual = mods.circles_and_squares(self.geo)\n self.geo[\"pore.diameter\"][1] = 0.9\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.circles_and_rectangles(self.geo)\n L_desired = np.array([[5.78750000e-01, 1.00000000e-15, 4.21250000e-01],\n [4.03112887e-01, 3.09658980e-01, 2.87228132e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n def test_cones_and_cylinders(self):\n L_actual = mods.cones_and_cylinders(self.geo)\n L_desired = np.array([[0.25, 0.3, 0.45],\n [0.45, 0.2, 0.35]])\n assert_allclose(L_actual, L_desired)\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.cones_and_cylinders(self.geo)\n L_desired = np.array([[5.7875e-01, 1.0000e-15, 4.2125e-01],\n [4.5000e-01, 2.0000e-01, 3.5000e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n def test_trapezoids_and_rectangles(self):\n L_actual = mods.trapezoids_and_rectangles(self.geo)\n L_desired = np.array([[0.25, 0.3, 0.45],\n [0.45, 0.2, 0.35]])\n assert_allclose(L_actual, L_desired)\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.trapezoids_and_rectangles(self.geo)\n L_desired = np.array([[5.7875e-01, 1.0000e-15, 4.2125e-01],\n [4.5000e-01, 2.0000e-01, 3.5000e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n def test_pyramids_and_cuboids(self):\n L_actual = mods.pyramids_and_cuboids(self.geo)\n L_desired = np.array([[0.25, 0.3, 0.45],\n [0.45, 0.2, 0.35]])\n assert_allclose(L_actual, L_desired)\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.pyramids_and_cuboids(self.geo)\n L_desired = np.array([[5.7875e-01, 1.0000e-15, 4.2125e-01],\n [4.5000e-01, 2.0000e-01, 3.5000e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n def test_cubes_and_cuboids(self):\n L_actual = mods.cubes_and_cuboids(self.geo)\n L_desired = np.array([[0.25, 0.3, 0.45],\n [0.45, 0.2, 0.35]])\n assert_allclose(L_actual, L_desired)\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.cubes_and_cuboids(self.geo)\n L_desired = np.array([[6.0e-01, 1.0e-15, 4.0e-01],\n [4.5e-01, 2.0e-01, 3.5e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n def test_squares_and_rectangles(self):\n L_actual = mods.squares_and_rectangles(self.geo)\n L_desired = np.array([[0.25, 0.3, 0.45],\n [0.45, 0.2, 0.35]])\n assert_allclose(L_actual, L_desired)\n # Overlapping pores\n self.geo[\"pore.diameter\"][0] = 1.2\n L_actual = mods.cubes_and_cuboids(self.geo)\n L_desired = np.array([[6.0e-01, 1.0e-15, 4.0e-01],\n [4.5e-01, 2.0e-01, 3.5e-01]])\n assert_allclose(L_actual, L_desired)\n self.geo[\"pore.diameter\"][0] = 0.5\n\n\nif __name__ == '__main__':\n\n t = ConduitLengthsTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print(f'Running test: {item}')\n t.__getattribute__(item)()\n",
"import pytest\nimport numpy as np\nimport openpnm as op\n\n\nclass BaseTest:\n\n def setup_class(self):\n ws = op.Workspace()\n ws.settings['local_data'] = True\n self.net = op.network.Cubic(shape=[3, 3, 3])\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.geo['pore.diameter'] = np.random.rand(self.net.Np)\n self.geo.add_model(propname='pore.volume',\n model=op.models.geometry.pore_volume.sphere)\n self.geo['throat.diameter'] = np.random.rand(self.net.Nt)\n self.geo.add_model(propname='throat.area',\n model=op.models.geometry.throat_cross_sectional_area.cylinder)\n self.geo.regenerate_models()\n self.geo['throat.label1'] = False\n self.geo['throat.label2'] = False\n self.geo['throat.label1'][0:6] = True\n self.geo['throat.label2'][3:9] = True\n self.net1 = op.network.Cubic(shape=[3, 3, 3])\n self.geo1 = op.geometry.GenericGeometry(network=self.net1,\n pores=self.net1.Ps,\n throats=self.net1.Ts)\n self.phase1 = op.phases.GenericPhase(network=self.net1)\n self.phase2 = op.phases.GenericPhase(network=self.net1)\n self.phys1 = op.physics.GenericPhysics(network=self.net1,\n geometry=self.geo1,\n phase=self.phase1)\n self.phys2 = op.physics.GenericPhysics(network=self.net1,\n geometry=self.geo1,\n phase=self.phase2)\n self.net2 = op.network.Cubic(shape=[3, 3, 3])\n Ps = np.arange(0, 18)\n Ts = self.net2.find_neighbor_pores(Ps, mode='or')\n self.geo21 = op.geometry.GenericGeometry(network=self.net2,\n pores=Ps,\n throats=Ts)\n Ps = np.arange(18, 27)\n Ts = self.net2.find_neighbor_pores(Ps, mode='xnor')\n self.geo22 = op.geometry.GenericGeometry(network=self.net2,\n pores=Ps,\n throats=Ts)\n\n def teardown_class(self):\n ws = op.Workspace()\n ws.clear()\n\n def test_clear_model_data(self):\n pn = op.network.Cubic([5, 5, 5])\n phase = op.phases.Water(network=pn)\n a = len(phase)\n phase.clear(mode='model_data')\n assert len(phase) == (a - len(phase.models))\n # Clear non existing data\n phase.clear(mode='model_data')\n assert len(phase) == (a - len(phase.models))\n\n def test_clear_model_data_when_model_returns_dictionary(self):\n pn = op.network.Cubic([5, 5, 5])\n geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)\n geo.clear(mode='model_data')\n\n def test_pores(self):\n a = self.net.pores()\n assert np.all(a == np.arange(0, self.net.Np))\n\n def test_pores_one_label(self):\n a = self.net.pores(labels='top')\n assert np.all(a == [2, 5, 8, 11, 14, 17, 20, 23, 26])\n\n def test_pores_two_labels_or(self):\n a = self.net.pores(labels=['top', 'left'], mode='or')\n assert np.all(a == [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 14, 17, 20, 23, 26])\n\n def test_pores_two_labels_xnor(self):\n a = self.net.pores(labels=['top', 'left'], mode='xnor')\n assert np.all(a == [2, 5, 8])\n\n def test_pores_two_labels_not_xor(self):\n a = self.net.pores(labels=['top', 'left'], mode='xor')\n assert np.all(a == [0, 1, 3, 4, 6, 7, 11, 14, 17, 20, 23, 26])\n\n def test_pores_two_labels_nor(self):\n a = self.net.pores(labels=['top', 'left'], mode='nor')\n assert np.all(a == [9, 10, 12, 13, 15, 16, 18, 19, 21, 22, 24, 25])\n b = self.net.pores(labels=['top', 'left'], mode='or')\n c = self.net.tomask(pores=a)*self.net.tomask(pores=b)\n assert c.sum() == 0\n\n def test_pores_two_labels_nand(self):\n a = self.net.pores(labels=['top', 'left'], mode='nand')\n assert np.all(a == [0, 1, 3, 4, 6, 7, 11, 14, 17, 20, 23, 26])\n\n def test_pores_bad_mode(self):\n with pytest.raises(Exception):\n self.net.pores(labels=['top', 'front'], mode='bob')\n\n def test_pores_empty_list(self):\n a = self.net.pores(labels=[], mode='or')\n assert a.size == 0\n\n def test_pores_asmask(self):\n a = self.net.pores(labels=['top', 'front'], mode='or', asmask=True)\n assert a.sum() == 15\n b = self.net.pores(labels=['top', 'front'], mode='or')\n assert np.all(np.where(a)[0] == b)\n\n def test_pores_with_target(self):\n net = op.network.Cubic(shape=[2, 2, 2])\n geo1 = op.geometry.GenericGeometry(network=net,\n pores=[1, 3, 5, 7],\n throats=range(6))\n geo2 = op.geometry.GenericGeometry(network=net,\n pores=[0, 2, 4, 6],\n throats=range(6, 12))\n assert np.all(net.pores('top', target=geo1) == [0, 1, 2, 3])\n assert len(net.pores('top', target=geo2)) == 0\n mapped = net.map_pores(pores=[0, 1, 2, 3], origin=geo1)\n assert np.all(mapped == net.pores('geo_01'))\n mapped = net.map_pores(pores=[0, 1, 2, 3], origin=geo2)\n assert np.all(mapped == net.pores('geo_02'))\n\n def test_throats_with_target(self):\n net = op.network.Cubic(shape=[2, 2, 2])\n geo1 = op.geometry.GenericGeometry(network=net,\n pores=[1, 3, 5, 7],\n throats=range(6))\n geo2 = op.geometry.GenericGeometry(network=net,\n pores=[0, 2, 4, 6],\n throats=range(6, 12))\n assert np.all(net.throats('surface', target=geo1) == [0, 1, 2, 3, 4, 5])\n assert np.all(net.throats('surface', target=geo2) == [0, 1, 2, 3, 4, 5])\n mapped = net.map_throats(throats=[0, 1, 2, 3, 4, 5], origin=geo1)\n assert np.all(mapped == net.throats('geo_01'))\n mapped = net.map_throats(throats=[0, 1, 2, 3, 4, 5], origin=geo2)\n assert np.all(mapped == net.throats('geo_02'))\n\n def test_throats(self):\n a = self.net.throats()\n assert np.all(a == np.arange(0, self.net.Nt))\n\n def test_throats_asmask(self):\n a = self.net.throats(labels=['internal'], mode='or', asmask=True)\n assert a.sum() == 54\n b = self.net.throats(labels=['internal'], mode='or')\n assert np.all(np.where(a)[0] == b)\n\n def test_throats_one_label(self):\n a = self.net.throats(labels='label1')\n assert np.all(a == [0, 1, 2, 3, 4, 5])\n\n def test_throats_two_labels_or(self):\n a = self.net.throats(labels=['label1', 'label2'], mode='or')\n assert np.all(a == [0, 1, 2, 3, 4, 5, 6, 7, 8])\n\n def test_throats_two_labels_xnor(self):\n a = self.net.throats(labels=['label1', 'label2'], mode='xnor')\n assert np.all(a == [3, 4, 5])\n\n def test_throats_two_labels_xor(self):\n a = self.net.throats(labels=['label1', 'label2'], mode='xor')\n assert np.all(a == [0, 1, 2, 6, 7, 8])\n\n def test_filter_by_label_pores_no_label(self):\n Ps = self.net.pores(['top', 'bottom', 'front'])\n with pytest.raises(Exception):\n self.net.filter_by_label(pores=Ps)\n\n def test_filter_by_label_pores_one_label_as_string(self):\n Ps = self.net.pores(['top', 'bottom', 'front'])\n a = self.net.filter_by_label(pores=Ps, labels='top', mode='or')\n b = [2, 5, 8, 11, 14, 17, 20, 23, 26]\n assert np.all(a == b)\n\n def test_filter_by_label_pores_one_label_as_list(self):\n Ps = self.net.pores(['top', 'bottom', 'front'])\n a = self.net.filter_by_label(pores=Ps, labels=['top'])\n b = [2, 5, 8, 11, 14, 17, 20, 23, 26]\n assert np.all(a == b)\n\n def test_filter_by_label_pores_two_labels_or(self):\n Ps = self.net.pores(['top', 'bottom', 'front'])\n a = self.net.filter_by_label(pores=Ps, labels=['top', 'bottom'],\n mode='or')\n b = [0, 2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 17, 18, 20, 21, 23, 24, 26]\n assert np.all(a == b)\n\n def test_filter_by_label_pores_two_labels_xnor(self):\n Ps = self.net.pores(['top', 'bottom', 'left'])\n a = self.net.filter_by_label(pores=Ps, labels=['top', 'left'],\n mode='xnor')\n b = [2, 5, 8]\n assert np.all(a == b)\n\n def test_filter_by_label_pores_two_labels_xnor_empty(self):\n Ps = self.net.pores(['top', 'bottom', 'left'])\n a = self.net.filter_by_label(pores=Ps, labels=['top', 'bottom'],\n mode='xnor')\n b = []\n assert np.all(a == b)\n\n def test_filter_by_label_pores_two_labels_xor(self):\n Ps = self.net.pores(['top', 'bottom', 'left'])\n a = self.net.filter_by_label(pores=Ps, labels=['top', 'left'],\n mode='xor')\n b = [0, 1, 3, 4, 6, 7, 11, 14, 17, 20, 23, 26]\n assert np.all(a == b)\n\n def test_filter_by_label_pores_two_labels_nor(self):\n Ps = self.net.pores(['top', 'bottom', 'left'])\n a = self.net.filter_by_label(pores=Ps, labels=['top', 'left'],\n mode='nor')\n b = [9, 12, 15, 18, 21, 24]\n assert np.all(a == b)\n\n def test_filter_by_label_empty_locations(self):\n a = self.net.filter_by_label(pores=[], labels='top')\n assert np.size(a) == 0\n\n def test_filter_by_label_pores_and_throats(self):\n with pytest.raises(Exception):\n self.net.filter_by_label(pores=[0, 1, 2], throats=[0, 1, 2])\n\n def test_tomask_pores(self):\n a = self.net.tomask(pores=self.net.pores('top'))\n assert np.sum(a) == 9\n\n def test_tomask_throats(self):\n a = self.net.tomask(throats=self.net.throats('label1'))\n assert np.sum(a) == 6\n\n def test_tomask_pores_and_throats(self):\n with pytest.raises(Exception):\n _ = self.net.tomask(throats=[0, 1, 2], pores=[0, 1, 2])\n\n def test_toindices_pores(self):\n mask = np.zeros((self.net.Np), dtype=bool)\n Ps = [0, 3, 6]\n mask[Ps] = True\n a = self.net.toindices(mask)\n assert np.all(a == Ps)\n\n def test_toindices_throats(self):\n mask = np.zeros((self.net.Nt), dtype=bool)\n Ts = [0, 3, 6]\n mask[Ts] = True\n a = self.net.toindices(mask)\n assert np.all(a == Ts)\n\n def test_toindices_float_mask(self):\n mask = (np.random.rand(self.net.Np) < 0.5)\n inds_in = np.where(mask)[0]\n inds_out = self.net.toindices(mask*1.0)\n assert np.all(inds_in == inds_out)\n\n def test_toindices_invalid_mask(self):\n mask = self.net.Np\n with pytest.raises(Exception):\n self.net.toindices(mask)\n\n def test_toindices_wrong_mask(self):\n mask = np.zeros((self.net.Nt)-2, dtype=bool)\n mask[[0, 3, 6]] = True\n with pytest.raises(Exception):\n self.net.toindices(mask)\n\n def test_count(self):\n with pytest.raises(Exception):\n self.net._count()\n\n def test_num_pores(self):\n a = self.net.num_pores()\n assert a == 27\n\n def test_num_pores_one_label(self):\n a = self.net.num_pores(labels='top')\n assert a == 9\n\n def test_num_pores_two_labels_or(self):\n a = self.net.num_pores(labels=['top', 'front'], mode='or')\n assert a == 15\n\n def test_num_pores_two_labels_xnor(self):\n a = self.net.num_pores(labels=['top', 'front'], mode='xnor')\n assert a == 3\n\n def test_num_pores_two_labels_xor(self):\n a = self.net.num_pores(labels=['top', 'front'],\n mode='xor')\n assert a == 12\n\n def test_num_pores_two_labels_nor(self):\n a = self.net.num_pores(labels=['top', 'front'], mode='nor')\n assert a == 12\n\n def test_num_throats(self):\n a = self.net.num_throats()\n assert a == 54\n\n def test_num_throats_one_label(self):\n a = self.net.num_throats(labels='label1')\n assert a == 6\n\n def test_num_throats_two_labels_or(self):\n a = self.net.num_throats(labels=['label1', 'label2'], mode='or')\n assert a == 9\n\n def test_num_throats_two_labels_xnor(self):\n a = self.net.num_throats(labels=['label1', 'label2'],\n mode='xnor')\n assert a == 3\n\n def test_num_throats_two_labels_xor(self):\n a = self.net.num_throats(labels=['label1', 'label2'],\n mode='xor')\n assert a == 6\n\n def test_num_throats_two_labels_nor(self):\n a = self.net.num_throats(labels=['label1', 'label2'],\n mode='nor')\n assert a == 45\n\n def test_keys_mode_skip(self):\n a = self.net.keys()\n assert 'dict_keys' in str(type(a))\n\n def test_keys_mode_props(self):\n a = self.net.keys(mode='props')\n assert 'dict_keys' not in str(type(a))\n b = [i for i in a if self.net[i].dtype != bool]\n assert a == b\n\n def test_keys_mode_labels(self):\n a = self.net.keys(mode='labels')\n assert 'dict_keys' not in str(type(a))\n b = [i for i in a if self.net[i].dtype == bool]\n assert a == b\n\n def test_keys_element_pores_mode_all(self):\n a = self.net.keys(element='pores', mode='all')\n b = [i.split('.')[0] for i in a]\n assert set(b) == {'pore'}\n\n def test_keys_element_throats_mode_all(self):\n a = self.net.keys(element='throats', mode='all')\n b = [i.split('.')[0] for i in a]\n assert set(b) == {'throat'}\n\n def test_keys_mode_props_and_labels(self):\n a = self.net.keys(mode=['props', 'labels'])\n b = list(self.net.keys())\n assert set(a) == set(b)\n\n def test_props_all(self):\n a = self.geo.props()\n assert sorted(a) == ['pore.diameter', 'pore.volume',\n 'throat.area', 'throat.diameter']\n\n def test_props_models(self):\n a = self.geo.props(mode='models')\n b = ['pore.volume', 'throat.area']\n assert sorted(a) == sorted(b)\n\n def test_props_constants(self):\n a = self.geo.props(mode='constants')\n b = ['pore.diameter', 'throat.diameter']\n assert sorted(a) == sorted(b)\n\n def test_props_pores_all(self):\n a = self.geo.props(element='pores')\n b = ['pore.diameter', 'pore.volume']\n assert sorted(a) == sorted(b)\n\n def test_props_pores_models(self):\n a = self.geo.props(element='pores', mode='models')\n b = ['pore.volume']\n assert sorted(a) == sorted(b)\n\n def test_props_pores_constants(self):\n a = self.geo.props(element='pores', mode='constants')\n b = ['pore.diameter']\n assert sorted(a) == sorted(b)\n\n def test_props_hidden_keys(self):\n self.net['pore._blah'] = 1.0\n assert 'pore._blah' not in self.net.__str__()\n assert 'pore._blah' not in self.net.props()\n assert 'pore._blah' in self.net.keys()\n\n def test_labels(self):\n a = self.net.labels()\n assert 'pore.top' in a\n\n def test_labels_on_pores(self):\n a = self.net.labels(element='pores')\n b = ['pore.all', 'pore.back', 'pore.bottom', 'pore.front',\n 'pore.internal', 'pore.left', 'pore.right', 'pore.'+self.geo.name,\n 'pore.top', 'pore.surface']\n assert sorted(a) == sorted(b)\n\n def test_labels_on_throats(self):\n a = self.net.labels(element='throats')\n b = ['throat.all', 'throat.internal', 'throat.surface',\n 'throat.'+self.geo.name]\n assert sorted(a) == sorted(b)\n\n def test_labels_on_pores_and_throats(self):\n with pytest.raises(Exception):\n self.net.labels(pores=[0, 1], throats=[0, 1])\n\n def test_labels_on_foo(self):\n with pytest.raises(Exception):\n self.net.labels(element='foo')\n\n def test_labels_on_all_pores(self):\n a = self.net.labels(pores=self.net.Ps)\n b = ['pore.all', 'pore.back', 'pore.bottom', 'pore.front',\n 'pore.internal', 'pore.left', 'pore.right', 'pore.'+self.geo.name,\n 'pore.top', 'pore.surface']\n assert sorted(a) == sorted(b)\n\n def test_labels_on_all_throats(self):\n a = self.net.labels(throats=self.net.Ts)\n b = ['throat.all', 'throat.internal', 'throat.surface',\n 'throat.'+self.geo.name]\n assert sorted(a) == sorted(b)\n\n def test_labels_on_one_pore(self):\n a = self.net.labels(pores=0)\n b = ['pore.all', 'pore.bottom', 'pore.front',\n 'pore.internal', 'pore.surface',\n 'pore.left', 'pore.'+self.geo.name]\n assert sorted(a) == sorted(b)\n\n def test_labels_on_list_of_pores(self):\n a = self.net.labels(pores=[0, 1])\n b = ['pore.all', 'pore.bottom', 'pore.front',\n 'pore.internal', 'pore.surface',\n 'pore.left', 'pore.'+self.geo.name]\n assert sorted(a) == sorted(b)\n\n def test_labels_locations_boolean(self):\n ind = np.zeros((self.net.Np), dtype=bool)\n ind[[0, 1]] = True\n a = self.net.labels(pores=ind)\n b = ['pore.all', 'pore.bottom', 'pore.front',\n 'pore.internal', 'pore.surface',\n 'pore.left', 'pore.'+self.geo.name]\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_or(self):\n a = self.net.labels(pores=[0, 1, 2], mode='or')\n b = ['pore.all', 'pore.bottom', 'pore.front',\n 'pore.internal', 'pore.surface',\n 'pore.left', 'pore.'+self.geo.name, 'pore.top']\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_and(self):\n a = self.net.labels(pores=[0, 1, 2], mode='and')\n b = ['pore.all', 'pore.front', 'pore.geo_01',\n 'pore.internal', 'pore.left', 'pore.surface']\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_xor(self):\n a = self.net.labels(pores=[0, 1, 2], mode='xor')\n b = ['pore.bottom', 'pore.top']\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_nand(self):\n a = self.net.labels(pores=[0, 1, 2], mode='nand')\n b = []\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_xnor(self):\n a = self.net.labels(pores=[0, 1, 2], mode='xnor')\n b = ['pore.all', 'pore.front', 'pore.internal',\n 'pore.surface', 'pore.left', 'pore.'+self.geo.name]\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_nor(self):\n a = self.net.labels(pores=[0, 1, 2], mode='nor')\n b = ['pore.back', 'pore.right']\n assert sorted(a) == sorted(b)\n\n def test_labels_pores_mode_foo(self):\n with pytest.raises(Exception):\n self.net.labels(pores=[0, 1], mode='foo')\n\n def test_labels_hidden_key(self):\n self.net['pore._foo'] = True\n assert 'pore._foo' not in self.net.__str__()\n assert 'pore._foo' in self.net.keys()\n\n def test_parse_indices_boolean(self):\n b = np.array([True, True, True])\n with pytest.raises(Exception):\n self.net._parse_indices(indices=b)\n b = np.zeros((self.net.Np,), dtype=bool)\n assert len(self.net._parse_indices(indices=b)) == 0\n b = np.zeros((self.net.Nt,), dtype=bool)\n b[[0, 1, 2]] = True\n assert np.shape(self.net._parse_indices(indices=b)) == (3,)\n\n def test_parse_indices_None(self):\n assert len(self.net._parse_indices(indices=None)) == 0\n\n def test_parse_indices_int(self):\n a = self.net._parse_indices(indices=0)\n assert isinstance(a, np.ndarray)\n assert np.all(a == 0)\n\n def test_parse_indices_list(self):\n a = self.net._parse_indices(indices=[0, 1])\n assert isinstance(a, np.ndarray)\n assert np.all(a == [0, 1])\n\n def test_parse_element_None(self):\n a = self.net._parse_element(element=None)\n assert sorted(a) == ['pore', 'throat']\n\n def test_parse_element_various_strings(self):\n a = self.net._parse_element(element='pore')\n assert a == ['pore']\n a = self.net._parse_element(element='Pore')\n assert a == ['pore']\n a = self.net._parse_element(element='pores')\n assert a == ['pore']\n a = self.net._parse_element(element='Pores')\n assert a == ['pore']\n a = self.net._parse_element(element='throat')\n assert a == ['throat']\n a = self.net._parse_element(element='Throat')\n assert a == ['throat']\n a = self.net._parse_element(element='throats')\n assert a == ['throat']\n a = self.net._parse_element(element='Throats')\n assert a == ['throat']\n\n def test_parse_element_bad_string(self):\n with pytest.raises(Exception):\n self.net._parse_element(element='pore2')\n\n def test_parse_element_duplicate(self):\n a = self.net._parse_element(element=['pore', 'pore'])\n assert a == ['pore']\n a = self.net._parse_element(element=['pore', 'pore'], single=True)\n assert a == 'pore'\n\n def test_parse_element_single_true(self):\n with pytest.raises(Exception):\n self.net._parse_element(element=['pore', 'throat'], single=True)\n a = self.net._parse_element(element=['pore'], single=True)\n assert a == 'pore'\n\n def test_parse_element_props(self):\n a = self.net._parse_element(element=['pore.diameter'], single=True)\n assert a == 'pore'\n\n def test_parse_labels_none(self):\n with pytest.raises(Exception):\n self.net._parse_labels(labels=None, element='pore')\n\n def test_parse_labels_string(self):\n a = self.net._parse_labels(labels='top', element='pore')\n assert a == ['pore.top']\n a = self.net._parse_labels(labels='internal', element='throat')\n assert a == ['throat.internal']\n a = self.net._parse_labels(labels='pore.top', element='pore')\n assert a == ['pore.top']\n a = self.net._parse_labels(labels='throat.internal', element='throat')\n assert a == ['throat.internal']\n\n def test_parse_labels_wildcards(self):\n a = self.net._parse_labels(labels='pore.b*', element='pore')\n assert sorted(a) == ['pore.back', 'pore.bottom']\n a = self.net._parse_labels(labels='pore.*ight', element='pore')\n assert sorted(a) == ['pore.right']\n\n def test_parse_labels_duplicates(self):\n a = self.net._parse_labels(['pore.r*', 'pore.right'], element='pore')\n assert a == ['pore.right']\n\n def test_parse_mode_string(self):\n a = self.net._parse_mode(mode='or')\n assert a == ['or']\n\n def test_parse_mode_single(self):\n a = self.net._parse_mode(mode=['or', 'xnor'])\n assert sorted(a) == ['or', 'xnor']\n with pytest.raises(Exception):\n a = self.net._parse_mode(mode=['or1', 'or2'], single=True)\n a = self.net._parse_mode(mode=['or1'], single=True)\n assert a == 'or1'\n\n def test_parse_mode_allowed(self):\n allowed = ['a', 'b', 'c']\n with pytest.raises(Exception):\n self.net._parse_mode(mode=['a', 'd'], allowed=allowed)\n\n def test_parse_mode_duplicate(self):\n a = self.net._parse_mode(mode=['or', 'or'])\n assert a == ['or']\n a = self.net._parse_mode(mode=['or', 'or'], single=True)\n assert a == 'or'\n\n def test_setitem_wrong_prefix(self):\n with pytest.raises(Exception):\n self.geo['pore2.test'] = 0\n\n def test_setitem_wrong_length(self):\n with pytest.raises(Exception):\n self.geo['pore.test'] = np.ones((self.geo.Np+1))\n assert 'pore.test' not in self.geo.keys()\n\n def test_setitem_replace_all(self):\n array_len = np.size(self.geo['pore.all'])\n self.geo['pore.all'] = np.ones((self.geo.Np+1))\n assert np.size(self.geo['pore.all']) == array_len\n\n def test_setitem_overwrite_into_all(self):\n pass\n # This test will fail as there is currently no way to prevent this\n # array_sum = np.sum(self.geo['pore.all'])\n # self.geo['pore.all'][0] = False\n # assert np.sum(self.geo['pore.all']) == array_sum\n\n def test_setitem_subdict_conflicts(self):\n self.geo['pore.foo'] = 1\n with pytest.raises(Exception):\n self.geo['pore.foo.bar'] = 1\n self.geo['throat.foo.bar'] = 1\n with pytest.raises(Exception):\n self.geo['throat.foo'] = 1\n\n def test_object_name_name_conflict(self):\n with pytest.raises(Exception):\n self.geo.name = self.net.name\n\n def test_object_name_array_conflict(self):\n with pytest.raises(Exception):\n self.geo.name = 'coords'\n Np = self.geo.Np\n Nt = self.geo.Nt\n assert self.geo.Np == Np\n assert self.geo.Nt == Nt\n\n def test_get_indices(self):\n temp = self.net.pop('pore.all')\n with pytest.raises(Exception):\n self.net._get_indices(element='pores', labels='blah')\n self.net.update({'pore.all': temp})\n\n def test_get_indices_wildcard(self):\n a = self.net._get_indices(element='pore', labels='ba*')\n assert np.all(a == [6, 7, 8, 15, 16, 17, 24, 25, 26])\n b = self.net._get_indices(element='pore', labels='*ck')\n assert np.all(a == b)\n\n def test_write_dict(self):\n self.net['pore.test_dict'] = {'test1': 1, 'test2': self.net.Ps}\n assert 'pore.test_dict.test1' in self.net.keys()\n assert self.net['pore.test_dict.test1'].shape == (self.net.Np, )\n assert 'pore.test_dict.test2' in self.net.keys()\n\n def test_map_pores(self):\n a = self.geo21['pore._id']\n b = self.geo22['pore._id']\n assert a.size == self.geo21.Np\n assert b.size == self.geo22.Np\n assert ~np.any(np.in1d(a, b))\n Pgeo21 = self.net2.map_pores(pores=self.geo21.Ps, origin=self.geo21)\n assert np.all(Pgeo21 == self.net2.pores(self.geo21.name))\n Pgeo22 = self.net2.map_pores(pores=self.geo22.Ps, origin=self.geo22)\n assert np.all(Pgeo22 == self.net2.pores(self.geo22.name))\n\n def test_map_throats(self):\n a = self.geo21['throat._id']\n assert a.size == self.geo21.Nt\n Tgeo21 = self.net2.map_throats(throats=self.geo21.Ts,\n origin=self.geo21)\n assert np.all(Tgeo21 == self.net2.throats(self.geo21.name))\n\n def test_map_pores_unfiltered(self):\n b = self.net.map_pores(pores=self.geo.Ps, origin=self.geo,\n filtered=False)\n assert np.all(b.indices == self.net.pores(self.geo.name))\n assert b.mask.size == self.geo.Np\n\n def test_map_pores_unfiltered_missing(self):\n Ps = self.net2.Ps[15:20]\n b = self.geo22.map_pores(pores=Ps, origin=self.net2, filtered=False)\n assert sum(b.mask) == 2\n assert len(b.mask) == 5\n\n def test_map_pores_reverse(self):\n Ps = self.net2.Ps[:5]\n b = self.geo21.map_pores(pores=Ps, origin=self.net2)\n assert np.all(b == [0, 1, 2, 3, 4])\n Ps = self.net2.Ps[-5:]\n b = self.geo22.map_pores(pores=Ps, origin=self.net2)\n assert np.all(b == [4, 5, 6, 7, 8])\n\n def test_map_pores_missing(self):\n Ps = self.net2.Ps[:5]\n b = self.geo22.map_pores(pores=Ps, origin=self.net2)\n assert len(b) == 0\n\n def test_getitem_with_no_matches(self):\n self.geo.pop('pore.blah', None)\n with pytest.raises(KeyError):\n _ = self.geo['pore.blah']\n\n def test_interpolate_data(self):\n self.geo['throat.tester'] = np.linspace(0, 1.0, self.geo.network.Nt)\n self.geo['pore.tester'] = np.linspace(0, 1.0, self.geo.network.Np)\n a = self.geo.interpolate_data(propname='throat.tester')\n assert a.size == self.geo.Np\n assert np.isclose(a.mean(), 0.5)\n a = self.geo.interpolate_data(propname='pore.tester')\n assert a.size == self.geo.Nt\n assert np.isclose(a.mean(), 0.5)\n\n def test_get_no_matches(self):\n self.geo.pop('pore.blah', None)\n with pytest.raises(KeyError):\n _ = self.geo['pore.blah']\n\n def test_get_string(self):\n a = self.net.get('pore.coords')\n assert a.shape == (self.net.Np, 3)\n\n # def test_interleave_data_with_unyts_on_all(self):\n # import unyt\n # pn = op.network.Cubic(shape=[10, 1, 1])\n # geo1 = op.geometry.GenericGeometry(network=pn, pores=[0, 1, 2, 3, 4])\n # geo2 = op.geometry.GenericGeometry(network=pn, pores=[5, 6, 7, 8, 9])\n # geo1['pore.test'] = np.random.rand(geo1.Np, ) * unyt.m\n # geo2['pore.test'] = np.random.rand(geo2.Np, ) * unyt.m\n # assert hasattr(pn['pore.test'], 'units')\n\n # def test_interleave_data_with_unyts_on_only_one(self):\n # import unyt\n # pn = op.network.Cubic(shape=[10, 1, 1])\n # geo1 = op.geometry.GenericGeometry(network=pn, pores=[0, 1, 2, 3, 4])\n # geo2 = op.geometry.GenericGeometry(network=pn, pores=[5, 6, 7, 8, 9])\n # geo1['pore.test'] = np.random.rand(geo1.Np, )\n # geo2['pore.test'] = np.random.rand(geo2.Np, ) * unyt.m\n # assert hasattr(pn['pore.test'], 'units')\n\n # def test_interpolate_date_with_unyts(self):\n # import unyt\n # pn = op.network.Cubic(shape=[10, 1, 1])\n # geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps)\n # geo['pore.test'] = np.random.rand(geo.Np, ) * unyt.m\n # a = geo.interpolate_data('pore.test')\n # assert hasattr(a, 'units')\n\n # def test_interpolate_date_with_unyts_but_none_assigned(self):\n # pn = op.network.Cubic(shape=[10, 1, 1])\n # geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps)\n # geo['pore.test'] = np.random.rand(geo.Np, )\n # b = geo.interpolate_data('pore.test')\n # assert not hasattr(b, 'units')\n\n def test_subdict_getitem_on_network_from_network(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn['pore.foo.bar'] = 1\n pn['pore.foo.baz'] = 2\n d = pn['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n\n def test_subdict_getitem_on_network_from_one_geometry(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn['pore.foo.bar'] = 1\n pn['pore.foo.baz'] = 2\n geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps,\n throats=pn.Ts)\n d = geo['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n\n def test_subdict_getitem_on_network_from_two_geometries(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn['pore.foo.bar'] = 1\n pn['pore.foo.baz'] = 2\n geo1 = op.geometry.GenericGeometry(network=pn,\n pores=pn.Ps[:75],\n throats=pn.Ts[:75])\n geo2 = op.geometry.GenericGeometry(network=pn,\n pores=pn.Ps[75:],\n throats=pn.Ts[75:])\n d = geo1['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n assert d['pore.foo.bar'].size == geo1.Np\n assert d['pore.foo.baz'].size == geo1.Np\n d = geo2['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n assert d['pore.foo.bar'].size == geo2.Np\n assert d['pore.foo.baz'].size == geo2.Np\n\n def test_subdict_getitem_on_phase_from_phase(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n air = op.phases.GenericPhase(network=pn)\n air['pore.foo.bar'] = 1\n air['pore.foo.baz'] = 2\n d = air['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n\n def test_subdict_getitem_on_phase_from_one_physics(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps,\n throats=pn.Ts)\n air = op.phases.GenericPhase(network=pn)\n phys = op.physics.GenericPhysics(network=pn, phase=air, geometry=geo)\n air['pore.foo.bar'] = 1\n air['pore.foo.baz'] = 2\n d = phys['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n\n def test_subdict_getitem_on_phase_from_two_physics(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n geo1 = op.geometry.GenericGeometry(network=pn,\n pores=pn.Ps[:75],\n throats=pn.Ts[:75])\n geo2 = op.geometry.GenericGeometry(network=pn,\n pores=pn.Ps[75:],\n throats=pn.Ts[75:])\n air = op.phases.GenericPhase(network=pn)\n phys1 = op.physics.GenericPhysics(network=pn, phase=air, geometry=geo1)\n phys2 = op.physics.GenericPhysics(network=pn, phase=air, geometry=geo2)\n air['pore.foo.bar'] = 1\n air['pore.foo.baz'] = 2\n\n d = phys1['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n assert d['pore.foo.bar'].size == geo1.Np\n assert d['pore.foo.baz'].size == geo1.Np\n d = phys2['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n assert d['pore.foo.bar'].size == geo2.Np\n assert d['pore.foo.baz'].size == geo2.Np\n\n def test_subdict_getitem_on_one_geometry(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps,\n throats=pn.Ts)\n geo['pore.foo.bar'] = 1\n geo['pore.foo.baz'] = 2\n d = pn['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n\n def test_subdict_getitem_on_two_geometries(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n geo1 = op.geometry.GenericGeometry(network=pn,\n pores=pn.Ps[:75],\n throats=pn.Ts[:75])\n geo2 = op.geometry.GenericGeometry(network=pn,\n pores=pn.Ps[75:],\n throats=pn.Ts[75:])\n geo1['pore.foo.bar'] = 1\n geo1['pore.foo.baz'] = 2\n d = pn['pore.foo']\n assert len(d) == 2\n assert 'pore.foo.bar' in d.keys()\n assert 'pore.foo.baz' in d.keys()\n assert np.any(np.isnan(d['pore.foo.bar']))\n assert np.any(np.isnan(d['pore.foo.baz']))\n geo2['pore.foo.bar'] = 1\n geo2['pore.foo.baz'] = 2\n d = pn['pore.foo']\n assert np.all(~np.isnan(d['pore.foo.bar']))\n assert np.all(~np.isnan(d['pore.foo.baz']))\n\n def test_subdict_lookup_errors(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn['pore.foo.bar'] = 1\n pn['pore.foo.baz'] = 2\n with pytest.raises(KeyError):\n _ = pn['pore.foo.b']\n with pytest.raises(KeyError):\n _ = pn['pore.fo']\n\n def test_set_label_add_to_pores(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', pores=[1, 2])\n assert pn['pore.tester'].sum() == 2\n\n def test_set_label_add_to_throats(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', throats=[1, 2])\n assert pn['throat.tester'].sum() == 2\n\n def test_set_label_overwrite_on_pores(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', pores=[1, 2])\n pn.set_label(label='tester', pores=[2, 3, 4], mode='overwrite')\n assert pn['pore.tester'].sum() == 3\n\n def test_set_label_overwrite_on_throats(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', throats=[1, 2])\n pn.set_label(label='tester', throats=[2, 3, 4], mode='overwrite')\n assert pn['throat.tester'].sum() == 3\n\n def test_set_label_remove_from_pores(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', pores=[1, 2])\n assert pn['pore.tester'].sum() == 2\n pn.set_label(label='tester', pores=[1, 2, 3], mode='remove')\n assert pn['pore.tester'].sum() == 0\n\n def test_set_label_remove_from_throats(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', throats=[1, 2])\n assert pn['throat.tester'].sum() == 2\n pn.set_label(label='tester', throats=[1, 2, 3], mode='remove')\n assert pn['throat.tester'].sum() == 0\n\n def test_set_label_purge_from_pores(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', pores=[1, 2])\n assert pn['pore.tester'].sum() == 2\n pn.set_label(label='tester', mode='purge')\n assert 'pore.tester' not in pn.keys()\n\n def test_set_label_purge_from_throats(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', throats=[1, 2])\n assert pn['throat.tester'].sum() == 2\n pn.set_label(label='tester', mode='purge')\n assert 'throat.tester' not in pn.keys()\n\n def test_set_label_purge_nonexistent_label(self):\n pn = op.network.Cubic(shape=[5, 5, 5])\n pn.set_label(label='tester', mode='purge')\n # Should only issue warning\n\n def test_model_run_when_data_missing(self):\n pn = op.network.Cubic(shape=[3, 3, 3])\n phase = op.phases.Air(network=pn, settings={'freeze_models': True})\n with pytest.raises(KeyError):\n a = phase['pore.viscosity']\n phase.settings['freeze_models'] = False\n a = phase['pore.viscosity']\n assert isinstance(a, np.ndarray)\n\n def test_renaming_to_current_name_is_allowed(self):\n obj = op.core.Base(name=\"temp\")\n obj.name = \"temp\"\n\n def test_object_names_must_be_unique_within_project(self):\n obj = op.core.Base(name=\"temp\")\n with pytest.raises(Exception):\n op.core.Base(name=\"temp\", project=obj.project)\n\n\nif __name__ == '__main__':\n\n t = BaseTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n",
"import openpnm as op\nimport openpnm.models.geometry.pore_surface_area as mods\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\n\nclass PoreSurfaceAreaTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[5, 5, 5], spacing=1.0)\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.air = op.phases.Air(network=self.net)\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.air,\n geometry=self.geo)\n self.geo['pore.diameter'] = 1\n self.geo['throat.cross_sectional_area'] = 0.1\n\n def test_sphere(self):\n self.geo.add_model(propname='pore.surface_area',\n model=mods.sphere,\n regen_mode='normal')\n a = np.array([2.54159265, 2.64159265, 2.74159265, 2.84159265])\n b = np.unique(self.geo['pore.surface_area'])\n assert_allclose(a, b)\n\n def test_circle(self):\n self.geo.add_model(propname='pore.surface_area',\n model=mods.circle,\n regen_mode='normal')\n a = np.array([2.54159265, 2.64159265, 2.74159265, 2.84159265])\n b = np.unique(self.geo['pore.surface_area'])\n assert_allclose(a, b)\n\n def test_cube(self):\n self.geo.add_model(propname='pore.surface_area',\n model=mods.cube,\n regen_mode='normal')\n a = np.array([5.4, 5.5, 5.6, 5.7])\n b = np.unique(self.geo['pore.surface_area'])\n assert_allclose(a, b)\n\n def test_square(self):\n self.geo.add_model(propname='pore.surface_area',\n model=mods.square,\n regen_mode='normal')\n a = np.array([3.4, 3.5, 3.6, 3.7])\n b = np.unique(self.geo['pore.surface_area'])\n assert_allclose(a, b)\n\n\nif __name__ == '__main__':\n\n t = PoreSurfaceAreaTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print(f'Running test: {item}')\n t.__getattribute__(item)()\n",
"import pytest\nimport openpnm as op\nfrom numpy.testing import assert_allclose\nfrom numpy import pi\nimport numpy as np\n\n\nclass FlowShapeFactorsTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[4, 4, 4])\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.phase = op.phases.GenericPhase(network=self.net)\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n self.geo['pore.diameter'] = 0.5\n self.geo['pore.area'] = pi/4*0.5**2\n self.geo['throat.diameter'] = 0.35\n self.geo['throat.area'] = pi/4*0.35**2\n self.geo['throat.conduit_lengths.pore1'] = 0.2\n self.geo['throat.conduit_lengths.throat'] = 0.7\n self.geo['throat.conduit_lengths.pore2'] = 0.15\n\n def test_ball_and_stick(self):\n mod = op.models.physics.flow_shape_factors.ball_and_stick\n self.phys.add_model(propname='throat.flow_shape_factors',\n model=mod)\n self.phys.regenerate_models()\n SF1 = self.phys['throat.flow_shape_factors.pore1'].mean()\n SF2 = self.phys['throat.flow_shape_factors.pore2'].mean()\n SFt = self.phys['throat.flow_shape_factors.throat'].mean()\n assert_allclose(SF1, desired=0.48180660)\n assert_allclose(SF2, desired=0.73590413)\n assert_allclose(SFt, desired=1.0)\n\n def test_ball_and_stick_raise_error_pore_size(self):\n self.setup_class()\n cn = self.net['throat.conns']\n L1 = self.geo['throat.conduit_lengths.pore1'][cn[:, 0][2]]\n self.geo['pore.diameter'][cn[:, 0][2]] = 0.5*L1\n mod = op.models.physics.flow_shape_factors.ball_and_stick\n with pytest.raises(Exception):\n self.phys.add_model(propname='throat.flow_shape_factors', model=mod)\n self.phys.regenerate_models()\n\n def test_ball_and_stick_equal_pore_and_throat_diameter(self):\n self.setup_class()\n self.geo['throat.diameter'] = 0.5\n self.geo['throat.area'] = pi/4*1**2\n mod = op.models.physics.flow_shape_factors.ball_and_stick\n self.phys.add_model(propname='throat.flow_shape_factors',\n model=mod)\n self.phys.regenerate_models()\n SF1 = self.phys['throat.flow_shape_factors.pore1'].mean()\n SF2 = self.phys['throat.flow_shape_factors.pore2'].mean()\n SFt = self.phys['throat.flow_shape_factors.throat'].mean()\n assert_allclose(SF1, desired=1.0)\n assert_allclose(SF2, desired=1.0)\n assert_allclose(SFt, desired=1.0)\n # Reverting changes\n self.geo['throat.diameter'] = 0.35\n self.geo['throat.area'] = pi/4*0.35**2\n\n def test_ball_and_stick_with_boundary_pores(self):\n self.setup_class()\n boundary_pores = [1, 8, 12, 55]\n conns = self.net['throat.conns']\n BP1 = np.in1d(conns[:, 0], boundary_pores)\n BP2 = np.in1d(conns[:, 1], boundary_pores)\n self.geo['throat.conduit_lengths.pore1'][BP1] = 0\n self.geo['throat.conduit_lengths.pore2'][BP2] = 0\n mod = op.models.physics.flow_shape_factors.ball_and_stick\n self.phys.add_model(propname='throat.flow_shape_factors',\n model=mod)\n self.phys.regenerate_models()\n SF1_BP = self.phys['throat.flow_shape_factors.pore1'][BP1].mean()\n SF2_BP = self.phys['throat.flow_shape_factors.pore2'][BP2].mean()\n assert_allclose(SF1_BP, desired=1.0)\n assert_allclose(SF2_BP, desired=1.0)\n # Reverting changes\n self.geo['throat.conduit_lengths.pore1'] = 0.2\n self.geo['throat.conduit_lengths.pore2'] = 0.15\n\n def test_conical_frustum_and_stick(self):\n self.setup_class()\n mod = op.models.physics.flow_shape_factors.conical_frustum_and_stick\n self.phys.add_model(propname='throat.flow_shape_factors',\n model=mod)\n self.phys.regenerate_models()\n SF1 = self.phys['throat.flow_shape_factors.pore1'].mean()\n SF2 = self.phys['throat.flow_shape_factors.pore2'].mean()\n SFt = self.phys['throat.flow_shape_factors.throat'].mean()\n assert_allclose(SF1, desired=0.469863013699)\n assert_allclose(SF2, desired=0.469863013699)\n assert_allclose(SFt, desired=1.0)\n\n def test_conical_frustum_and_stick_equal_pore_and_throat_diameter(self):\n self.setup_class()\n self.geo['throat.diameter'] = 0.5\n self.geo['throat.area'] = pi/4*1**2\n mod = op.models.physics.flow_shape_factors.conical_frustum_and_stick\n self.phys.add_model(propname='throat.flow_shape_factors',\n model=mod)\n self.phys.regenerate_models()\n SF1 = self.phys['throat.flow_shape_factors.pore1'].mean()\n SF2 = self.phys['throat.flow_shape_factors.pore2'].mean()\n SFt = self.phys['throat.flow_shape_factors.throat'].mean()\n assert_allclose(SF1, desired=1.0)\n assert_allclose(SF2, desired=1.0)\n assert_allclose(SFt, desired=1.0)\n # Reverting changes\n self.geo['throat.diameter'] = 0.35\n self.geo['throat.area'] = pi/4*0.35**2\n\n def test_conical_frustum_and_stick_with_boundary_pores(self):\n self.setup_class()\n boundary_pores = [1, 8, 12, 55]\n conns = self.net['throat.conns']\n BP1 = np.in1d(conns[:, 0], boundary_pores)\n BP2 = np.in1d(conns[:, 1], boundary_pores)\n self.geo['throat.conduit_lengths.pore1'][BP1] = 0\n self.geo['throat.conduit_lengths.pore2'][BP2] = 0\n mod = op.models.physics.flow_shape_factors.conical_frustum_and_stick\n self.phys.add_model(propname='throat.flow_shape_factors',\n model=mod)\n self.phys.regenerate_models()\n SF1_BP = self.phys['throat.flow_shape_factors.pore1'][BP1].mean()\n SF2_BP = self.phys['throat.flow_shape_factors.pore2'][BP2].mean()\n assert_allclose(SF1_BP, desired=1.0)\n assert_allclose(SF2_BP, desired=1.0)\n # Reverting changes\n self.geo['throat.conduit_lengths.pore1'] = 0.2\n self.geo['throat.conduit_lengths.pore2'] = 0.15\n\n\nif __name__ == '__main__':\n\n t = FlowShapeFactorsTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n",
"import numpy as np\nimport openpnm as op\n\n\nclass GenericNetworkTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[10, 10, 10])\n\n def teardown_class(self):\n ws = op.Workspace()\n ws.clear()\n\n def test_find_connected_pores_numeric_not_flattend(self):\n a = self.net.find_connected_pores(throats=[0, 1])\n assert np.all(a.flatten() == [0, 1, 1, 2])\n\n def test_find_connected_pores_numeric_flattend(self):\n a = self.net.find_connected_pores(throats=[0, 1], flatten=True)\n assert np.all(a == [0, 1, 2])\n\n def test_find_connected_pores_boolean_flattend(self):\n Tind = np.zeros((self.net.Nt,), dtype=bool)\n Tind[[0, 1]] = True\n a = self.net.find_connected_pores(throats=Tind, flatten=True)\n assert np.all(a == [0, 1, 2])\n\n def test_find_connected_pores_empty_flattend(self):\n a = self.net.find_connected_pores(throats=[], flatten=True)\n assert np.shape(a) == (0, )\n\n def test_find_neighbor_pores_numeric(self):\n a = self.net.find_neighbor_pores(pores=[])\n assert np.size(a) == 0\n\n def test_find_neighbor_pores_boolean(self):\n Pind = np.zeros((self.net.Np,), dtype=bool)\n Pind[[0, 1]] = True\n a = self.net.find_neighbor_pores(pores=Pind)\n assert np.all(a == [2, 10, 11, 100, 101])\n\n def test_find_neighbor_pores_numeric_union(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='union')\n assert np.all(a == [1, 3, 10, 12, 100, 102])\n\n def test_find_neighbor_pores_numeric_intersection(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='xnor')\n assert np.all(a == [1])\n\n def test_find_neighbor_pores_numeric_exclusive_or(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='exclusive_or')\n assert np.all(a == [3, 10, 12, 100, 102])\n\n def test_find_neighbor_pores_numeric_union_include_input(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='or',\n include_input=True)\n assert np.all(a == [1, 3, 10, 12, 100, 102])\n a = self.net.find_neighbor_pores(pores=[0, 1], mode='or',\n include_input=True)\n assert np.all(a == [0, 1, 2, 10, 11, 100, 101])\n\n def test_find_neighbor_pores_numeric_intersection_include_input(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='and',\n include_input=True)\n assert np.all(a == [1])\n a = self.net.find_neighbor_pores(pores=[0, 1], mode='and',\n include_input=True)\n assert np.all(a == [])\n\n def test_find_neighbor_pores_numeric_intersection_exclude_input(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='and',\n include_input=False)\n assert np.all(a == [1])\n\n def test_find_neighbor_pores_numeric_exclusive_or_include_input(self):\n a = self.net.find_neighbor_pores(pores=[0, 2], mode='exclusive_or',\n include_input=True)\n assert np.all(a == [3, 10, 12, 100, 102])\n a = self.net.find_neighbor_pores(pores=[0, 1], mode='exclusive_or',\n include_input=True)\n assert np.all(a == [0, 1, 2, 10, 11, 100, 101])\n\n def test_find_neighbor_throats_on_pores_wo_throats(self):\n net = op.network.Cubic(shape=[10, 10, 1])\n ts = net.find_neighbor_throats(pores=net.Ps[-1])\n op.topotools.trim(net, throats=ts)\n ts2 = net.find_neighbor_throats(pores=99)\n assert ts2.size == 0\n\n def test_find_neighbor_throats_empty(self):\n a = self.net.find_neighbor_throats(pores=[])\n assert np.size(a) == 0\n\n def test_find_neighbor_throats_boolean(self):\n Pind = np.zeros((self.net.Np,), dtype=bool)\n Pind[[0, 1]] = True\n a = self.net.find_neighbor_throats(pores=Pind)\n assert np.all(a == [0, 1, 900, 901, 1800, 1801])\n\n def test_find_neighbor_throats_numeric_union(self):\n a = self.net.find_neighbor_throats(pores=[0, 2], mode='union')\n assert np.all(a == [0, 1, 2, 900, 902, 1800, 1802])\n\n def test_find_neighbor_throats_numeric_intersection(self):\n a = self.net.find_neighbor_throats(pores=[0, 2], mode='xnor')\n assert np.size(a) == 0\n\n def test_find_neighbor_throats_numeric_exclusive_or(self):\n a = self.net.find_neighbor_throats(pores=[0, 2],\n mode='exclusive_or')\n assert np.all(a == [0, 1, 2, 900, 902, 1800, 1802])\n\n def test_num_neighbors_empty(self):\n a = self.net.num_neighbors(pores=[])\n assert np.size(a) == 0\n\n def test_num_neighbors_pores_flattened(self):\n a = self.net.num_neighbors(pores=0, flatten=True)\n assert a == 3\n assert isinstance(a, int)\n a = self.net.num_neighbors(pores=[0, 2], flatten=True)\n assert a == 6\n assert isinstance(a, int)\n\n def test_num_neighbors_pores_with_modes(self):\n a = self.net.num_neighbors(pores=[0, 2], mode='union', flatten=True)\n assert a == 6\n a = self.net.num_neighbors(pores=[0, 2], mode='xnor',\n flatten=True)\n assert a == 1\n a = self.net.num_neighbors(pores=[0, 2], mode='exclusive_or',\n flatten=True)\n assert a == 5\n\n def test_num_neighbors_pores_not_flattened(self):\n a = self.net.num_neighbors(pores=[0, 2], flatten=False)\n assert np.all(a == [3, 4])\n a = self.net.num_neighbors(pores=0, flatten=False)\n assert np.all(a == [3])\n assert isinstance(a, np.ndarray)\n\n def test_find_nearby_pores_distance_1(self):\n a = self.net.find_nearby_pores(pores=[0, 1], r=1, flatten=False,\n include_input=True)\n b = self.net.find_neighbor_pores(pores=[0, 1], flatten=False,\n include_input=True)\n assert np.all([np.all(a[i] == b[i]) for i in range(0, len(a))])\n\n def test_find_nearby_pores_distance_2(self):\n a = self.net.find_nearby_pores(pores=[0, 1], r=2)\n assert np.all([np.size(a[i]) for i in [0, 1]] == [9, 13])\n\n def test_find_nearby_pores_distance_0(self):\n a = self.net.find_nearby_pores(pores=[0, 1], r=1e-9, flatten=False)\n assert np.shape(a) == (2, 0)\n a = self.net.find_nearby_pores(pores=[0, 1], r=1e-9, flatten=True)\n assert a.shape == (0,)\n\n def test_find_nearby_pores_distance_1_flattened(self):\n a = self.net.find_nearby_pores(pores=[0, 1], r=1, flatten=True)\n b = self.net.find_neighbor_pores(pores=[0, 1])\n assert np.all(a == b)\n\n def test_find_nearby_pores_distance_2_flattened(self):\n a = self.net.find_nearby_pores(pores=[0, 1], r=2, flatten=True)\n assert np.size(a) == 15\n\n def test_find_nearby_pores_distance_2_flattened_include_input(self):\n a = self.net.find_nearby_pores(pores=[0, 1], r=2,\n flatten=True, include_input=True)\n assert np.size(a) == 17\n assert np.all(np.in1d([0, 1], a))\n\n\nif __name__ == '__main__':\n\n t = GenericNetworkTest()\n t.setup_class()\n self = t\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n",
"import openpnm as op\nimport openpnm.models.geometry.pore_cross_sectional_area as mods\nimport numpy as np\nfrom numpy.testing import assert_approx_equal\n\n\nclass PoreAreaTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[5, 5, 5], spacing=1.0)\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.air = op.phases.Air(network=self.net)\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.air,\n geometry=self.geo)\n self.geo['pore.diameter'] = 1.0\n self.geo['throat.area'] = 0.1\n\n def test_sphere(self):\n self.geo.add_model(propname='pore.area',\n model=mods.sphere,\n regen_mode='normal')\n a = np.array([0.78539816])\n b = np.unique(self.geo['pore.area'])\n assert_approx_equal(a, b)\n\n def test_cube(self):\n self.geo.add_model(propname='pore.area',\n model=mods.cube,\n regen_mode='normal')\n a = np.array([1.0])\n b = np.unique(self.geo['pore.area'])\n assert_approx_equal(a, b)\n\n def test_circle(self):\n self.geo.add_model(propname='pore.area',\n model=mods.circle,\n regen_mode='normal')\n a = np.array([1.0])\n b = np.unique(self.geo['pore.area'])\n assert_approx_equal(a, b)\n\n def test_square(self):\n self.geo.add_model(propname='pore.area',\n model=mods.square,\n regen_mode='normal')\n a = np.array([1.0])\n b = np.unique(self.geo['pore.area'])\n assert_approx_equal(a, b)\n\n\nif __name__ == '__main__':\n\n t = PoreAreaTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n"
] | [
[
"pandas.read_table",
"numpy.vstack"
],
[
"numpy.random.seed"
],
[
"numpy.reshape",
"numpy.set_printoptions",
"numpy.sort",
"numpy.ones",
"numpy.atleast_2d",
"numpy.fromstring",
"numpy.array2string",
"numpy.zeros"
],
[
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"numpy.linspace",
"numpy.isnan",
"numpy.arange",
"numpy.in1d",
"numpy.ones",
"numpy.all",
"numpy.size",
"numpy.where",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.array",
"numpy.testing.assert_allclose",
"numpy.unique"
],
[
"numpy.in1d",
"numpy.testing.assert_allclose"
],
[
"numpy.in1d",
"numpy.all",
"numpy.size",
"numpy.shape",
"numpy.zeros"
],
[
"numpy.array",
"numpy.testing.assert_approx_equal",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
inestm28/si | [
"a82ba37bd628c5ebdc723f5e1a9894832c8f1a76",
"a82ba37bd628c5ebdc723f5e1a9894832c8f1a76"
] | [
"src/si/util/cv.py",
"src/si/data/dataset.py"
] | [
"from .util import train_test_split\nimport numpy as np\nimport itertools\n\n# MODEL SELECTION\n\nclass Cross_Validation:\n #avaliar a performance de um modelo\n def __init__(self, model, dataset,score=None, **kwargs):\n self.model=model #modelo que se quer avaliar\n self.dataset=dataset\n self.cv=kwargs.get('cv',3) #.get returns 3. number of folds (K-fold)\n self.split=kwargs.get('split', 0.8)\n self.train_scores=None\n self.test_scores=None\n self.ds=None\n self.score=score\n\n def run(self):\n train_scores = []\n test_scores = []\n ds=[] #lista com tuplos de conjuntos de treino e de teste\n for _ in range(self.cv): # 3 folds. underscore pq não vamos precisar do valor da variável\n train, test = train_test_split(self.dataset, self.split)\n ds.append((train, test))\n self.model.fit(train)\n if not self.score: #if self.score diferente de None então corre o ciclo\n train_scores.append(self.model.cost()) #cost -> dá a medida de quão longe o valor previsto está do output original\n test_scores.append(self.model.cost(test.X, test.y))\n else: #if self.score = None\n y_train=np.ma.apply_along_axis(self.model.predict, axis=0, arr=train.X.T)\n train_scores.append(self.score(train.y, y_train))\n y_test=np.ma.apply_along_axis(self.model.predict, axis=0, arr=test.X.T)\n test_scores.append(self.score(test.y, y_test))\n self.train_scores=train_scores\n self.test_scores=test_scores\n self.ds=ds\n return train_scores, test_scores #accuracies de cada fold\n\n def toDataframe(self):\n import pandas as pd\n assert self.train_scores and self.test_scores, 'Need to run code first'\n return pd.DataFrame({'Train Scores': self.train_scores, 'Test scores': self.test_scores})\n\nclass Grid_Search:\n #automatically selecting the best hyper parameteres for a particular model\n def __init__(self, model, dataset, parameters, **kwargs):\n self.model=model #modelo a ser avaliado\n self.dataset=dataset\n hasparam=[hasattr(self.model, param) for param in parameters] #hasattr() returns true if an object has the given named attribute, hasattr(object, name of attribute)\n if np.all(hasparam): #Test whether all array elements along a given axis evaluate to True.\n self.parameters=parameters #dictionary of all the parameters and their corresponding list of values that you want to test for best performance\n else:\n index=hasparam.index(False)\n keys=list(parameters.keys())\n raise ValueError(f\"wrong parameters: {keys[index]}\")\n self.kwargs=kwargs\n self.results=None\n\n def run(self):\n self.results=[]\n attrs=list(self.parameters.keys()) #nome dos parametros\n values=list(self.parameters.values()) #valores dos parametros\n for conf in itertools.product(*values): #itertools.product -> cartesian product of all the iterable provided as the argument.\n for i in range(len(attrs)):\n setattr(self.model, attrs[i], conf[i])\n scores=Cross_Validation(self.model, self.dataset, **self.kwargs).run() #faz CROSS VALIDATION\n self.results.append((conf, scores)) #para cada valor de parametro, dá as accuracies do modelo\n return self.results\n\n def toDataframe(self):\n import pandas as pd\n assert self.results, 'The grid search needs to be ran.'\n data=dict()\n for i, k in enumerate(self.parameters.keys()):\n v=[]\n for r in self.results:\n v.append(r[0][i])\n data[k]=v\n for i in range(len(self.results[0][1][0])):\n treino, teste = [], []\n for r in self.results:\n treino.append(r[1][0][i])\n teste.append(r[1][1][i])\n data['Train ' + str(i + 1)] = treino\n data['Test ' + str(i + 1)] = teste\n return pd.DataFrame(data)",
"import numpy as np\nfrom ..util.util import label_gen\n\n__all__ = ['Dataset']\n\n\nclass Dataset:\n def __init__(self, X=None, y=None,\n xnames: list = None,\n yname: str = None):\n \"\"\" Tabular Dataset\"\"\"\n if X is None:\n raise Exception(\"Trying to instanciate a DataSet without any data\")\n self.X = X\n self.y = y\n self._xnames = xnames if xnames else label_gen(X.shape[1])\n self._yname = yname if yname else 'y'\n\n @classmethod\n def from_data(cls, filename, sep=\",\", labeled=True):\n \"\"\"Creates a DataSet from a data file.\n\n :param filename: The filename\n :type filename: str\n :param sep: attributes separator, defaults to \",\"\n :type sep: str, optional\n :return: A DataSet object\n :rtype: DataSet\n \"\"\"\n data = np.genfromtxt(filename, delimiter=sep)\n if labeled:\n X = data[:, 0:-1]\n y = data[:, -1]\n else:\n X = data\n y = None\n return cls(X, y)\n\n @classmethod\n def from_dataframe(cls, df, ylabel=None):\n \"\"\"Creates a DataSet from a pandas dataframe.\n\n :param df: [description]\n :type df: [type]\n :param ylabel: [description], defaults to None\n :type ylabel: [type], optional\n :return: [description]\n :rtype: [type]\n \"\"\"\n\n if ylabel and ylabel in df.columns:\n X = df.loc[:, df.columns != ylabel].to_numpy()\n y = df.loc[:, ylabel].to_numpy()\n xnames = list(df.columns)\n xnames.remove(ylabel)\n yname = ylabel\n else:\n X = df.to_numpy()\n y = None\n xnames = list(df.columns)\n yname = None\n return cls(X, y, xnames, yname)\n\n def __len__(self):\n \"\"\"Returns the number of data points.\"\"\"\n return self.X.shape[0]\n\n def hasLabel(self):\n \"\"\"Returns True if the dataset constains labels (a dependent variable)\"\"\"\n return self.y is not None\n\n def getNumFeatures(self):\n \"\"\"Returns the number of features\"\"\"\n return self.X.shape[1]\n\n def getNumClasses(self):\n \"\"\"Returns the number of label classes or 0 if the dataset has no dependent variable.\"\"\"\n return len(np.unique(self.y)) if self.hasLabel() else 0\n\n def writeDataset(self, filename, sep=\",\"):\n \"\"\"Saves the dataset to a file\n\n :param filename: The output file path\n :type filename: str\n :param sep: The fields separator, defaults to \",\"\n :type sep: str, optional\n \"\"\"\n if self.y is not None:\n fullds = np.hstack((self.X, self.y.reshape(len(self.y), 1)))\n else:\n fullds = self.X\n np.savetxt(filename, fullds, delimiter=sep)\n\n def toDataframe(self):\n \"\"\" Converts the dataset into a pandas DataFrame\"\"\"\n import pandas as pd\n if self.y is not None:\n fullds = np.hstack((self.X, self.y.reshape(len(self.y), 1)))\n columns = self._xnames[:]+[self._yname]\n else:\n fullds = self.X.copy()\n columns = self._xnames[:]\n return pd.DataFrame(fullds, columns=columns)\n\n def getXy(self):\n return self.X, self.y\n\n\ndef summary(dataset, format='df'):\n \"\"\" Returns the statistics of a dataset(mean, std, max, min)\n\n :param dataset: A Dataset object\n :type dataset: si.data.Dataset\n :param format: Output format ('df':DataFrame, 'dict':dictionary ), defaults to 'df'\n :type format: str, optional\n \"\"\"\n if dataset.hasLabel():\n fullds = np.hstack((dataset.X, dataset.y.reshape(len(dataset.y), 1)))\n columns = dataset._xnames[:]+[dataset._yname]\n else:\n fullds = dataset.X\n columns = dataset._xnames[:]\n stats = {}\n for i in range(fullds.shape[1]):\n try:\n _means = np.mean(fullds[:, i], axis=0)\n _vars = np.var(fullds[:, i], axis=0)\n _maxs = np.max(fullds[:, i], axis=0)\n _mins = np.min(fullds[:, i], axis=0)\n except Exception:\n _means = _vars = _maxs = _mins = np.NAN\n stat = {'mean': _means,\n 'var': _vars,\n 'min': _mins,\n 'max': _maxs\n }\n stats[columns[i]] = stat\n if format == 'df':\n import pandas as pd\n df = pd.DataFrame(stats)\n return df\n else:\n return stats\n"
] | [
[
"numpy.all",
"numpy.ma.apply_along_axis",
"pandas.DataFrame"
],
[
"numpy.min",
"numpy.unique",
"numpy.var",
"pandas.DataFrame",
"numpy.genfromtxt",
"numpy.max",
"numpy.mean",
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
EmadAlamoudi/libpetab-python-MS | [
"7d21d79e9c02200d361a19c737d61c0e56123ca0"
] | [
"tests/test_visualization.py"
] | [
"import warnings\nfrom os import path\nfrom tempfile import TemporaryDirectory\nimport pytest\nfrom petab.C import *\nfrom petab.visualize import (plot_data_and_simulation,\n plot_measurements_by_observable,\n save_vis_spec)\nimport matplotlib.pyplot as plt\n\n\[email protected]\ndef data_file_Fujita():\n return \"doc/example/example_Fujita/Fujita_measurementData.tsv\"\n\n\[email protected]\ndef condition_file_Fujita():\n return \"doc/example/example_Fujita/Fujita_experimentalCondition.tsv\"\n\n\[email protected]\ndef data_file_Fujita_wrongNoise():\n return \"doc/example/example_Fujita/Fujita_measurementData_wrongNoise.tsv\"\n\n\[email protected]\ndef data_file_Fujita_nanData():\n return \"doc/example/example_Fujita/Fujita_measurementData_nanData.tsv\"\n\n\[email protected]\ndef simu_file_Fujita():\n return \"doc/example/example_Fujita/Fujita_simulatedData.tsv\"\n\n\[email protected]\ndef data_file_Fujita_minimal():\n return \"doc/example/example_Fujita/Fujita_measurementData_minimal.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_small():\n return \"doc/example/example_Fujita/Fujita_visuSpec_small.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_wo_dsid():\n return \"doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_1.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_minimal():\n return \"doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_mandatory.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_empty():\n return \"doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_empty.tsv\"\n\n\[email protected]\ndef data_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_measurementData.tsv\"\n\n\[email protected]\ndef condition_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_experimentalCondition.tsv\"\n\n\[email protected]\ndef vis_spec_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_visualizationSpecification.tsv\"\n\n\[email protected]\ndef simulation_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_simulationData.tsv\"\n\n\ndef test_visualization_with_vis_and_sim(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee,\n simulation_file_Isensee):\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee,\n simulation_file_Isensee)\n\n\ndef test_visualization_with_vis(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee):\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee)\n\n\ndef test_visualization_small_visu_file_w_datasetid(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_small):\n \"\"\"\n Test: visualization spezification file only with few columns in\n particular datasetId\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_small)\n\n\ndef test_visualization_small_visu_file_wo_datasetid(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_wo_dsid):\n \"\"\"\n Test: visualization spezification file only with few columns in\n particular no datasetId column\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_wo_dsid)\n\n\ndef test_visualization_minimal_visu_file(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_minimal):\n \"\"\"\n Test: visualization spezification file only with mandatory column plotId\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_minimal)\n\n\ndef test_visualization_empty_visu_file(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_empty):\n \"\"\"\n Test: Empty visualization spezification file should default to routine\n for no file at all\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_empty)\n\n\ndef test_visualization_minimal_data_file(data_file_Fujita_minimal,\n condition_file_Fujita,\n visu_file_Fujita_small):\n \"\"\"\n Test visualization, with the case: data file only with mandatory columns\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita_minimal,\n condition_file_Fujita,\n visu_file_Fujita_small)\n\n\ndef test_visualization_with_dataset_list(data_file_Isensee,\n condition_file_Isensee,\n simulation_file_Isensee):\n datasets = [['JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_ctrl',\n 'JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_Fsk'],\n ['JI09_160201_Drg453-452_CycNuc__ctrl',\n 'JI09_160201_Drg453-452_CycNuc__Fsk',\n 'JI09_160201_Drg453-452_CycNuc__Sp8_Br_cAMPS_AM']]\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets)\n\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n sim_data=simulation_file_Isensee,\n dataset_id_list=datasets)\n\n\ndef test_visualization_without_datasets(data_file_Fujita,\n condition_file_Fujita,\n simu_file_Fujita):\n sim_cond_num_list = [[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5]]\n sim_cond_id_list = [['model1_data1'], ['model1_data2', 'model1_data3'],\n ['model1_data4', 'model1_data5'], ['model1_data6']]\n observable_num_list = [[0], [1], [2], [0, 2], [1, 2]]\n observable_id_list = [['pS6_tot'], ['pEGFR_tot'], ['pAkt_tot']]\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_num_list=sim_cond_num_list,\n plotted_noise=PROVIDED)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n sim_cond_num_list=sim_cond_num_list,\n plotted_noise=PROVIDED)\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_id_list=sim_cond_id_list)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n sim_cond_id_list=sim_cond_id_list)\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n observable_num_list=observable_num_list)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n observable_num_list=observable_num_list)\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n observable_id_list=observable_id_list,\n plotted_noise=PROVIDED)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n observable_id_list=observable_id_list,\n plotted_noise=PROVIDED)\n\n\ndef test_visualization_omit_empty_datasets(data_file_Fujita_nanData,\n condition_file_Fujita):\n observable_num_list = [[0, 1]]\n plot_data_and_simulation(data_file_Fujita_nanData, condition_file_Fujita,\n observable_num_list=observable_num_list)\n\n\ndef test_visualization_raises(data_file_Fujita,\n condition_file_Fujita,\n data_file_Fujita_wrongNoise):\n sim_cond_num_list = [[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5]]\n sim_cond_id_list = [['model1_data1'], ['model1_data2', 'model1_data3'],\n ['model1_data4', 'model1_data5'], ['model1_data6']]\n observable_num_list = [[0], [1], [2], [0, 2], [1, 2]]\n observable_id_list = [['pS6_tot'], ['pEGFR_tot'], ['pAkt_tot']]\n error_counter = 0\n\n # Combining simulation condition numbers and IDs should not be allowed\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_num_list=sim_cond_num_list,\n sim_cond_id_list=sim_cond_id_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Either specify a list of simulation '\n 'condition IDs or a list of simulation '\n 'condition numbers, but not both. '\n 'Stopping.')\n error_counter += 1\n assert (error_counter == 1)\n\n # Combining observable numbers and IDs should not be allowed\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n observable_num_list=observable_num_list,\n observable_id_list=observable_id_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Either specify a list of observable IDs or '\n 'a list of observable numbers, but not both. '\n 'Stopping.')\n error_counter += 1\n assert (error_counter == 2)\n\n # Combining observable and simulation conditions numbers or IDs should not\n # be allowed\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_num_list=observable_num_list,\n observable_num_list=observable_num_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Plotting without visualization specification'\n ' file and datasetId can be performed via '\n 'grouping by simulation conditions OR '\n 'observables, but not both. Stopping.')\n error_counter += 1\n assert (error_counter == 3)\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_id_list=observable_id_list,\n observable_id_list=observable_id_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Plotting without visualization specification'\n ' file and datasetId can be performed via '\n 'grouping by simulation conditions OR '\n 'observables, but not both. Stopping.')\n error_counter += 1\n assert (error_counter == 4)\n\n # If no numerical noise is provided, it should not work to plot it\n try:\n plot_measurements_by_observable(data_file_Fujita_wrongNoise,\n condition_file_Fujita,\n plotted_noise='provided')\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == \"No numerical noise values provided in the \"\n \"measurement table. Stopping.\")\n error_counter += 1\n\n assert (error_counter == 5)\n\n\ndef test_visualization_warnings(data_file_Isensee, condition_file_Isensee):\n datasets = [['JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_ctrl',\n 'JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_Fsk'],\n ['JI09_160201_Drg453-452_CycNuc__ctrl',\n 'JI09_160201_Drg453-452_CycNuc__Fsk',\n 'JI09_160201_Drg453-452_CycNuc__Sp8_Br_cAMPS_AM']]\n sim_cond_num_list = [[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5]]\n observable_num_list = [[0], [1], [2], [0, 2], [1, 2]]\n\n # close open figures to avoid runtime warnings\n plt.close(\"all\")\n\n with warnings.catch_warnings(record=True) as warnMsg:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # plotting with datasetIds and sim conditions should issue a warning\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n sim_cond_num_list=sim_cond_num_list)\n\n # plotting with datasetIds and observables should issue a warning\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n observable_num_list=observable_num_list)\n\n # plotting with datasetIds and observables and sim conditions should\n # issue a warning\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n observable_num_list=observable_num_list,\n sim_cond_num_list=sim_cond_num_list)\n\n # plotting grouped by something else than datasetIds should issue a\n # warning if datasetsIDs would have been available\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n sim_cond_num_list=sim_cond_num_list)\n\n # test correct number of warnings\n warnings_list = [msg for msg in warnMsg if\n not issubclass(msg.category, DeprecationWarning)]\n assert len(warnings_list) == 4\n\n # test that all warnings were indeed UserWarnings\n for i_warn in warnings_list:\n assert issubclass(i_warn.category, UserWarning)\n\n\ndef test_simple_visualization(data_file_Fujita, condition_file_Fujita):\n plot_measurements_by_observable(data_file_Fujita, condition_file_Fujita)\n plot_measurements_by_observable(data_file_Fujita, condition_file_Fujita,\n plotted_noise=PROVIDED)\n\n\ndef test_save_plots_to_file(data_file_Isensee, condition_file_Isensee,\n vis_spec_file_Isensee, simulation_file_Isensee):\n with TemporaryDirectory() as temp_dir:\n plot_data_and_simulation(\n data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee,\n simulation_file_Isensee,\n subplot_file_path=temp_dir)\n\n\ndef test_save_visu_file(data_file_Isensee,\n condition_file_Isensee):\n\n with TemporaryDirectory() as temp_dir:\n save_vis_spec(data_file_Isensee,\n condition_file_Isensee,\n output_file_path=path.join(temp_dir, \"visuSpec.tsv\"))\n\n datasets = [['JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_ctrl',\n 'JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_Fsk'],\n ['JI09_160201_Drg453-452_CycNuc__ctrl',\n 'JI09_160201_Drg453-452_CycNuc__Fsk',\n 'JI09_160201_Drg453-452_CycNuc__Sp8_Br_cAMPS_AM']]\n\n save_vis_spec(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n output_file_path=path.join(temp_dir, \"visuSpec1.tsv\"))\n"
] | [
[
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeffersonHsieh/tapas | [
"a2f1c8c763c08487bed6b91884dac946dd766ab9",
"a2f1c8c763c08487bed6b91884dac946dd766ab9"
] | [
"tapas/utils/tf_example_utils.py",
"tapas/retrieval/tfidf_baseline_utils.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Utilities for converting interactions to TF examples.\"\"\"\n\nimport collections\nimport dataclasses\nimport hashlib\nimport random\nfrom typing import Iterable, List, Mapping, Optional, Text, Tuple\n\nfrom absl import logging\nfrom apache_beam import metrics as beam_metrics\nfrom tapas.protos import annotated_text_pb2\nfrom tapas.protos import interaction_pb2\nfrom tapas.protos import table_selection_pb2\nfrom tapas.utils import constants\nfrom tapas.utils import interpretation_utils\nfrom tapas.utils import number_annotation_utils\nfrom tapas.utils import sentence_tokenizer\nfrom tapas.utils import text_index\nfrom tapas.utils import text_utils\nimport tensorflow.compat.v1 as tf\n\nfrom official.nlp.bert import tokenization\n\n_NS = 'main'\n_CLS = '[CLS]'\n_EMPTY = '[EMPTY]'\n_MASK = '[MASK]'\n_SEP = '[SEP]'\n_NAN = float('nan')\n_MAX_NUM_CANDIDATES = 1000\n_MAX_NUM_ROWS = 32\n_WP_PER_CELL = 1.5\n_MAX_INDEX_LENGTH = int(_MAX_NUM_CANDIDATES * _MAX_NUM_ROWS * _WP_PER_CELL)\n_MAX_INT = 2**32 - 1\n\n\[email protected](frozen=True)\nclass Token:\n original_text: Text\n piece: Text\n\n\[email protected](frozen=True)\nclass TrainingInstance:\n tokens: List[Token]\n segment_ids: List[int]\n column_ids: List[int]\n row_ids: List[int]\n masked_lm_positions: List[int]\n masked_lm_labels: List[Text]\n is_random_table: bool\n\n\[email protected](frozen=True)\nclass TokenCoordinates:\n column_index: int\n row_index: int\n token_index: int\n\n\[email protected]\nclass TokenizedTable:\n rows: List[List[List[Token]]]\n selected_tokens: List[TokenCoordinates]\n\n\[email protected](frozen=True)\nclass MaskedLmInstance:\n index: int\n label: Text\n\n\[email protected](frozen=True)\nclass ConversionConfig:\n \"\"\"Configues conversion to TF example.\n\n vocab_file: Bert vocab file\n max_seq_length: Max length of a sequence in word pieces.\n max_column_id: Max column id to extract.\n max_row_id: Max row id to extract.\n \"\"\"\n vocab_file: Text\n max_seq_length: int\n max_column_id: int\n max_row_id: int\n strip_column_names: bool\n\n\[email protected](frozen=True)\nclass PretrainConversionConfig(ConversionConfig):\n \"\"\"Configures options speciic to pretraining data creation.\n\n max_predictions_per_seq: Max predictions per sequence for mask task.\n min_question_length: Min question length.\n max_question_length: Max question length.\n always_continue_cells: If true always mask entire cells.\n strip_column_names: If true, add empty strings instead of column names.\n random_seed: Random seed.\n masked_lm_prob: Percentage of tokens to mask.\n concatenate_snippets: If true concatenate snippets in a random fashion.\n \"\"\"\n max_predictions_per_seq: int\n masked_lm_prob: float\n random_seed: int\n min_question_length: int\n max_question_length: int\n always_continue_cells: bool\n concatenate_snippets: bool = True\n\n\[email protected](frozen=True)\nclass TrimmedConversionConfig(ConversionConfig):\n # if > 0: Trim cells so that the length is <= this value.\n # Also disables further cell trimming should thus be used with\n # 'drop_rows_to_fit' below.\n # TODO(thomasmueller) Make this a parameter of the base config.\n # TODO(thomasmueller) Consider giving this a better name.\n cell_trim_length: int = -1\n\n\[email protected](frozen=True)\nclass ClassifierConversionConfig(TrimmedConversionConfig):\n \"\"\"The config used to extract the tf examples for the classifier model.\"\"\"\n add_aggregation_candidates: bool = False\n expand_entity_descriptions: bool = False\n use_entity_title: bool = False\n entity_descriptions_sentence_limit: int = 5\n use_document_title: bool = False\n # Re-computes answer coordinates from the answer text.\n update_answer_coordinates: bool = False\n # Drop last rows if table doesn't fit within max sequence length.\n drop_rows_to_fit: bool = False\n # If true adds the context heading of the table to the question.\n use_context_title: bool = False\n # For TPU prediction we serialize strings into a fix length.\n trim_question_ids: bool = False\n # For each data split how to up/down sample the dataset\n label_sampling_rate: Mapping[Tuple[Text, int],\n float] = dataclasses.field(default_factory=dict)\n is_multi_hop: bool = False\n # self._is_multi_hop = config.is_multi_hop\n use_bridge_entity: bool = False\n # self._use_bridge_entity = config.use_bridge_entity\n use_question_type: bool = False\n # self._use_question_type = config.use_question_type\n\n\[email protected](frozen=True)\nclass RetrievalConversionConfig(TrimmedConversionConfig):\n use_document_title: bool = True\n use_section_title: bool = False\n use_caption: bool = False\n use_abbv: bool = False\n use_header: bool = True\n use_content: bool = True\n oracle_abbv_expansion: bool = False\n\n\[email protected](frozen=True)\nclass SerializedExample:\n tokens: List[Token]\n column_ids: List[int]\n row_ids: List[int]\n segment_ids: List[int]\n\n\ndef copy_vocab(input_vocab, output_vocab):\n \"\"\"Copies vocabulary file and add [EMPTY] token.\"\"\"\n with tf.io.gfile.GFile(input_vocab) as input_vocab_file:\n with tf.io.gfile.GFile(output_vocab, 'w') as output_vocab_file:\n for token in input_vocab_file:\n output_vocab_file.write('[EMPTY]\\n' if token ==\n '[unused0]\\n' else token)\n\n\ndef _get_pieces(tokens):\n return (token.piece for token in tokens)\n\n\ndef fingerprint(text):\n return int(hashlib.sha256(text.encode('utf-8')).hexdigest(), 16)\n\n\ndef create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n\ndef create_float_feature(values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n\n\ndef create_string_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=list(values)))\n\n\ndef _is_inner_wordpiece(token):\n return token.piece.startswith('##')\n\n\ndef _get_cell_token_indexes(column_ids, row_ids,\n column_id, row_id):\n for index in range(len(column_ids)):\n if (column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id):\n yield index\n\n\ndef _get_buckets(value, buckets, name):\n for bucket_value in buckets:\n if value <= bucket_value:\n return '%s: <= %d' % (name, bucket_value)\n return '%s: < inf' % (name)\n\n\ndef _get_all_answer_ids_from_coordinates(\n column_ids,\n row_ids,\n answers_list,\n):\n \"\"\"Maps lists of answer coordinates to token indexes.\"\"\"\n answer_ids = [0] * len(column_ids)\n found_answers = set()\n all_answers = set()\n for answers in answers_list:\n for column_index, row_index in answers:\n all_answers.add((column_index, row_index))\n for index in _get_cell_token_indexes(column_ids, row_ids, column_index,\n row_index):\n found_answers.add((column_index, row_index))\n answer_ids[index] = 1\n\n missing_count = len(all_answers) - len(found_answers)\n buckets = [1, 2, 3, 4, 5, 10, 25, 50, 100]\n if missing_count:\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(missing_count, buckets, 'Missing answers')).inc()\n if found_answers:\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(len(found_answers), buckets, 'Found answers')).inc()\n return answer_ids, missing_count\n\n\ndef _get_all_answer_ids(\n column_ids,\n row_ids,\n questions,\n):\n \"\"\"Maps lists of questions with answer coordinates to token indexes.\"\"\"\n\n def _to_coordinates(\n question,):\n return [(coords.column_index, coords.row_index)\n for coords in question.answer.answer_coordinates]\n\n return _get_all_answer_ids_from_coordinates(\n column_ids,\n row_ids,\n answers_list=(_to_coordinates(question) for question in questions),\n )\n\n\ndef _find_tokens(text, segment):\n \"\"\"Return start index of segment in text or None.\"\"\"\n logging.info('text: %s %s', text, segment)\n for index in range(1 + len(text) - len(segment)):\n for seg_index, seg_token in enumerate(segment):\n if text[index + seg_index].piece != seg_token.piece:\n break\n else:\n return index\n return None\n\n\ndef _find_answer_coordinates_from_answer_text(\n tokenized_table,\n answer_text,\n):\n \"\"\"Returns all occurrences of answer_text in the table.\"\"\"\n logging.info('answer text: %s', answer_text)\n for row_index, row in enumerate(tokenized_table.rows):\n if row_index == 0:\n # We don't search for answers in the header.\n continue\n for col_index, cell in enumerate(row):\n token_index = _find_tokens(cell, answer_text)\n if token_index is not None:\n yield TokenCoordinates(\n row_index=row_index,\n column_index=col_index,\n token_index=token_index,\n )\n\n\ndef _find_answer_ids_from_answer_texts(\n column_ids,\n row_ids,\n tokenized_table,\n answer_texts,\n):\n \"\"\"Maps question with answer texts to the first matching token indexes.\"\"\"\n answer_ids = [0] * len(column_ids)\n for answer_text in answer_texts:\n found_answer_text = False\n found_answer_text_ids = False\n for coordinates in _find_answer_coordinates_from_answer_text(\n tokenized_table,\n answer_text,\n ):\n found_answer_text = True\n # Maps answer coordinates to indexes this can fail if tokens / rows have\n # been pruned.\n indexes = list(\n _get_cell_token_indexes(\n column_ids,\n row_ids,\n column_id=coordinates.column_index,\n row_id=coordinates.row_index - 1,\n ))\n indexes.sort()\n coordinate_answer_ids = []\n if indexes:\n begin_index = coordinates.token_index + indexes[0]\n end_index = begin_index + len(answer_text)\n for index in indexes:\n if index >= begin_index and index < end_index:\n coordinate_answer_ids.append(index)\n if len(coordinate_answer_ids) == len(answer_text):\n found_answer_text_ids = True\n for index in coordinate_answer_ids:\n answer_ids[index] = 1\n break\n beam_metrics.Metrics.counter(_NS, 'Answer texts: total').inc()\n if found_answer_text:\n beam_metrics.Metrics.counter(_NS, 'Answer texts: found').inc()\n if found_answer_text_ids:\n beam_metrics.Metrics.counter(_NS, 'Answer texts: found ids').inc()\n\n return answer_ids\n\n\ndef _get_answer_ids(column_ids, row_ids,\n question):\n \"\"\"Maps answer coordinates to token indexes.\"\"\"\n answer_ids, missing_count = _get_all_answer_ids(column_ids, row_ids,\n [question])\n\n if missing_count:\n raise ValueError(\"Couldn't find all answers\")\n return answer_ids\n\n\ndef _get_annotation_name(identifier):\n \"\"\"Extracts the clean title from a Wikipedia identifier.\"\"\"\n # Example input: /wiki/New_York_City -> New York City\n return identifier.split('/')[-1].replace('_', ' ')\n\n\ndef _add_entity_descriptions_to_table(\n question,\n descriptions,\n table,\n use_entity_title,\n num_results,\n):\n \"\"\"Expand table cells with the descriptions of the entities mentioned.\n\n This function will add entity descriptions inside the Table proto by expanding\n the content of each cell according to entities mentioned in that cell. The\n sentences in the descriptions will be ranked by similarity to the question and\n only the top results will be included.\n\n Args:\n question: Question proto containing the question text. The text will be used\n to filter only a subset of the descriptions using a similarity criteria.\n descriptions: A map that contains for entity id, its textual description.\n table: Table to be modified in-place. Some cells may contain annotation\n extensions with entity ids that will be expanded with their descriptions.\n use_entity_title: Prepend the entity title to entity descriptions.\n num_results: Limit on the number of entities to expand with a description.\n \"\"\"\n descriptions = {\n key: sentence_tokenizer.tokenize(description)\n for key, description in descriptions.items()\n }\n documents = []\n for sentences in descriptions.values():\n for sentence in sentences:\n documents.append(sentence)\n\n search_results = text_index.TextIndex(documents).search(\n question.text, num_results=num_results)\n logging.log_first_n(logging.INFO,\n '%s selected entity annotations for %s: %s', 100,\n question.id, question.text, search_results)\n search_results_set = {r.text for r in search_results}\n\n buckets = [1, 2, 3, 4, 5, 10, 25, 50, 100]\n sentences_kept = len(search_results_set)\n sentences_discarded = len(documents) - sentences_kept\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(sentences_kept, buckets, 'Descriptions kept')).inc()\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(sentences_discarded, buckets,\n 'Descriptions discarded')).inc()\n\n annotated_cell_ext = annotated_text_pb2.AnnotatedText.annotated_cell_ext\n for row in table.rows:\n for cell in row.cells:\n if annotated_cell_ext in cell.Extensions:\n for annotation in cell.Extensions[annotated_cell_ext].annotations:\n sentences = descriptions[annotation.identifier]\n filtered_sentences = ' '.join(\n sent for sent in sentences if sent in search_results_set)\n if filtered_sentences:\n if use_entity_title:\n annotation_name = _get_annotation_name(annotation.identifier)\n cell.text += f' ( {annotation_name} : {filtered_sentences} )'\n else:\n cell.text += f' ( {filtered_sentences} )'\n\n\nclass TapasTokenizer:\n \"\"\"Wraps a Bert tokenizer.\"\"\"\n\n def __init__(self, vocab_file):\n self._basic_tokenizer = tokenization.BasicTokenizer(do_lower_case=True)\n self._wp_tokenizer = tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=True)\n\n def get_vocab(self):\n return self._wp_tokenizer.vocab.keys()\n\n def tokenize(self, text):\n if text_utils.format_text(text) == constants.EMPTY_TEXT:\n return [Token(_EMPTY, _EMPTY)]\n tokens = []\n for token in self._basic_tokenizer.tokenize(text):\n for piece in self._wp_tokenizer.tokenize(token):\n tokens.append(Token(token, piece))\n return tokens\n\n def convert_tokens_to_ids(self, word_pieces):\n return self._wp_tokenizer.convert_tokens_to_ids(word_pieces)\n\n def question_encoding_cost(self, question_tokens):\n # Two extra spots of SEP and CLS.\n return len(question_tokens) + 2\n\n\nclass ToTensorflowExampleBase:\n \"\"\"Base class for converting interactions to TF examples.\"\"\"\n\n def __init__(self, config):\n self._max_seq_length = config.max_seq_length\n self._max_column_id = config.max_column_id\n self._max_row_id = config.max_row_id\n self._strip_column_names = config.strip_column_names\n self._tokenizer = TapasTokenizer(config.vocab_file)\n\n def _tokenize_table(\n self,\n table,\n ):\n \"\"\"Runs tokenizer over columns and table cell texts.\"\"\"\n tokenized_rows = []\n tokenized_row = []\n for column in table.columns:\n if self._strip_column_names:\n tokenized_row.append(self._tokenizer.tokenize(''))\n else:\n tokenized_row.append(self._tokenizer.tokenize(column.text))\n tokenized_rows.append(tokenized_row)\n\n for row in table.rows:\n tokenized_row = []\n for cell in row.cells:\n tokenized_row.append(self._tokenizer.tokenize(cell.text))\n tokenized_rows.append(tokenized_row)\n\n token_coordinates = []\n for row_index, row in enumerate(tokenized_rows):\n for column_index, cell in enumerate(row):\n for token_index, _ in enumerate(cell):\n token_coordinates.append(\n TokenCoordinates(\n row_index=row_index,\n column_index=column_index,\n token_index=token_index,\n ))\n\n return TokenizedTable(\n rows=tokenized_rows,\n selected_tokens=token_coordinates,\n )\n\n def _get_table_values(self, table, num_columns,\n num_rows,\n num_tokens):\n \"\"\"Iterates over partial table and returns token, col. and row indexes.\"\"\"\n for tc in table.selected_tokens:\n # First row is header row.\n if tc.row_index >= num_rows + 1:\n continue\n if tc.column_index >= num_columns:\n continue\n cell = table.rows[tc.row_index][tc.column_index]\n token = cell[tc.token_index]\n word_begin_index = tc.token_index\n # Don't add partial words. Find the starting word piece and check if it\n # fits in the token budget.\n while (word_begin_index >= 0 and\n _is_inner_wordpiece(cell[word_begin_index])):\n word_begin_index -= 1\n if word_begin_index >= num_tokens:\n continue\n yield token, tc.column_index + 1, tc.row_index\n\n def _serialize_text(\n self, question_tokens\n ):\n \"\"\"Serialzes texts in index arrays.\"\"\"\n tokens = []\n segment_ids = []\n column_ids = []\n row_ids = []\n\n tokens.append(Token(_CLS, _CLS))\n segment_ids.append(0)\n column_ids.append(0)\n row_ids.append(0)\n\n for token in question_tokens:\n tokens.append(token)\n segment_ids.append(0)\n column_ids.append(0)\n row_ids.append(0)\n\n return tokens, segment_ids, column_ids, row_ids\n\n def _serialize(\n self,\n question_tokens,\n table,\n num_columns,\n num_rows,\n num_tokens,\n ):\n \"\"\"Serializes table and text.\"\"\"\n tokens, segment_ids, column_ids, row_ids = self._serialize_text(\n question_tokens)\n\n tokens.append(Token(_SEP, _SEP))\n segment_ids.append(0)\n column_ids.append(0)\n row_ids.append(0)\n\n for token, column_id, row_id in self._get_table_values(\n table, num_columns, num_rows, num_tokens):\n tokens.append(token)\n segment_ids.append(1)\n column_ids.append(column_id)\n row_ids.append(row_id)\n\n return SerializedExample(\n tokens=tokens,\n segment_ids=segment_ids,\n column_ids=column_ids,\n row_ids=row_ids,\n )\n\n def _tokenize(self, text):\n return self._tokenizer.tokenize(text)\n\n def _get_token_budget(self, question_tokens):\n return self._max_seq_length - self._tokenizer.question_encoding_cost(\n question_tokens)\n\n def _get_table_boundaries(self,\n table):\n \"\"\"Return maximal number of rows, columns and tokens.\"\"\"\n max_num_tokens = 0\n max_num_columns = 0\n max_num_rows = 0\n for tc in table.selected_tokens:\n max_num_columns = max(max_num_columns, tc.column_index + 1)\n max_num_rows = max(max_num_rows, tc.row_index + 1)\n max_num_tokens = max(max_num_tokens, tc.token_index + 1)\n max_num_columns = min(self._max_column_id, max_num_columns)\n max_num_rows = min(self._max_row_id, max_num_rows)\n return max_num_rows, max_num_columns, max_num_tokens\n\n def _get_table_cost(self, table, num_columns,\n num_rows, num_tokens):\n return sum(1 for _ in self._get_table_values(table, num_columns, num_rows,\n num_tokens))\n\n def _get_column_values(\n self, table,\n col_index):\n table_numeric_values = {}\n for row_index, row in enumerate(table.rows):\n cell = row.cells[col_index]\n if cell.HasField('numeric_value'):\n table_numeric_values[row_index] = cell.numeric_value\n return table_numeric_values\n\n def _add_numeric_column_ranks(self, column_ids, row_ids,\n table,\n features):\n \"\"\"Adds column ranks for all numeric columns.\"\"\"\n\n ranks = [0] * len(column_ids)\n inv_ranks = [0] * len(column_ids)\n\n if table:\n for col_index in range(len(table.columns)):\n table_numeric_values = self._get_column_values(table, col_index)\n if not table_numeric_values:\n continue\n\n try:\n key_fn = number_annotation_utils.get_numeric_sort_key_fn(\n table_numeric_values.values())\n except ValueError:\n continue\n\n table_numeric_values = {\n row_index: key_fn(value)\n for row_index, value in table_numeric_values.items()\n }\n\n table_numeric_values_inv = collections.defaultdict(list)\n for row_index, value in table_numeric_values.items():\n table_numeric_values_inv[value].append(row_index)\n\n unique_values = sorted(table_numeric_values_inv.keys())\n\n for rank, value in enumerate(unique_values):\n for row_index in table_numeric_values_inv[value]:\n for index in _get_cell_token_indexes(column_ids, row_ids, col_index,\n row_index):\n ranks[index] = rank + 1\n inv_ranks[index] = len(unique_values) - rank\n\n features['column_ranks'] = create_int_feature(ranks)\n features['inv_column_ranks'] = create_int_feature(inv_ranks)\n\n def _get_numeric_sort_key_fn(self, table_numeric_values, value):\n \"\"\"Returns the sort key function for comparing value to table values.\n\n The function returned will be a suitable input for the key param of the\n sort(). See number_annotation_utils._get_numeric_sort_key_fn for details.\n\n Args:\n table_numeric_values: Numeric values of a column\n value: Numeric value in the question.\n\n Returns:\n A function key function to compare column and question values.\n\n \"\"\"\n if not table_numeric_values:\n return None\n all_values = list(table_numeric_values.values())\n all_values.append(value)\n try:\n return number_annotation_utils.get_numeric_sort_key_fn(all_values)\n except ValueError:\n return None\n\n def _add_numeric_relations(self, question,\n column_ids, row_ids,\n table,\n features):\n \"\"\"Adds numeric relation emebeddings to 'features'.\n\n Args:\n question: The question, numeric values are used.\n column_ids: Maps word piece position to column id.\n row_ids: Maps word piece position to row id.\n table: The table containing the numeric cell values.\n features: Output.\n \"\"\"\n\n numeric_relations = [0 for _ in column_ids]\n\n # Create a dictionary that maps a table cell to the set of all relations\n # this cell has with any value in the question.\n cell_indices_to_relations = collections.defaultdict(set)\n if question is not None and table is not None:\n for numeric_value_span in question.annotations.spans:\n for value in numeric_value_span.values:\n for column_index in range(len(table.columns)):\n table_numeric_values = self._get_column_values(table, column_index)\n sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values,\n value)\n if sort_key_fn is None:\n continue\n for row_index, cell_value in table_numeric_values.items():\n relation = number_annotation_utils.get_numeric_relation(\n value, cell_value, sort_key_fn)\n if relation is not None:\n cell_indices_to_relations[column_index, row_index].add(relation)\n\n # For each cell add a special feature for all its word pieces.\n for (column_index,\n row_index), relations in cell_indices_to_relations.items():\n relation_set_index = 0\n for relation in relations:\n assert relation.value >= constants.Relation.EQ.value\n relation_set_index += 2**(relation.value - constants.Relation.EQ.value)\n beam_metrics.Metrics.counter(\n _NS, 'Relation Set Index: %d' % relation_set_index).inc()\n for cell_token_index in _get_cell_token_indexes(column_ids, row_ids,\n column_index, row_index):\n numeric_relations[cell_token_index] = relation_set_index\n\n features['numeric_relations'] = create_int_feature(numeric_relations)\n\n def _add_numeric_values(self, table,\n token_ids_dict,\n features):\n \"\"\"Adds numeric values for computation of answer loss.\"\"\"\n numeric_values = [_NAN] * self._max_seq_length\n if table:\n for col_index in range(len(table.columns)):\n for row_index in range(len(table.rows)):\n\n numeric_value = table.rows[row_index].cells[col_index].numeric_value\n if not numeric_value.HasField('float_value'):\n continue\n\n float_value = numeric_value.float_value\n if float_value == float('inf'):\n beam_metrics.Metrics.counter(\n _NS, 'cell with numeric value of infinite').inc()\n continue\n\n for index in _get_cell_token_indexes(token_ids_dict['column_ids'],\n token_ids_dict['row_ids'],\n col_index, row_index):\n numeric_values[index] = float_value\n features['numeric_values'] = create_float_feature(numeric_values)\n\n def _add_numeric_values_scale(self, table, token_ids_dict, features):\n \"\"\"Adds a scale to each token to down weigh the value of long words.\"\"\"\n numeric_values_scale = [1.0] * self._max_seq_length\n if not table:\n return numeric_values_scale\n for col_index in range(len(table.columns)):\n for row_index in range(len(table.rows)):\n indices = [\n index for index in _get_cell_token_indexes(\n token_ids_dict['column_ids'], token_ids_dict['row_ids'],\n col_index, row_index)\n ]\n num_indices = len(indices)\n if num_indices > 1:\n for index in indices:\n numeric_values_scale[index] = float(num_indices)\n features['numeric_values_scale'] = create_float_feature(\n numeric_values_scale)\n\n def _pad_to_seq_length(self, inputs):\n while len(inputs) > self._max_seq_length:\n inputs.pop()\n while len(inputs) < self._max_seq_length:\n inputs.append(0)\n\n def _to_token_ids(self, tokens):\n return self._tokenizer.convert_tokens_to_ids(_get_pieces(tokens))\n\n def _to_features(\n self, tokens, token_ids_dict,\n table,\n question):\n \"\"\"Produces a dict of TF features.\"\"\"\n tokens = list(tokens)\n token_ids_dict = {\n key: list(values) for key, values in token_ids_dict.items()\n }\n\n length = len(tokens)\n for values in token_ids_dict.values():\n if len(values) != length:\n raise ValueError('Inconsistent length')\n\n input_ids = self._to_token_ids(tokens)\n input_mask = [1] * len(input_ids)\n\n self._pad_to_seq_length(input_ids)\n self._pad_to_seq_length(input_mask)\n for values in token_ids_dict.values():\n self._pad_to_seq_length(values)\n\n assert len(input_ids) == self._max_seq_length\n assert len(input_mask) == self._max_seq_length\n for values in token_ids_dict.values():\n assert len(values) == self._max_seq_length\n\n features = collections.OrderedDict()\n features['input_ids'] = create_int_feature(input_ids)\n features['input_mask'] = create_int_feature(input_mask)\n for key, values in sorted(token_ids_dict.items()):\n features[key] = create_int_feature(values)\n\n self._add_numeric_column_ranks(token_ids_dict['column_ids'],\n token_ids_dict['row_ids'], table, features)\n\n self._add_numeric_relations(question, token_ids_dict['column_ids'],\n token_ids_dict['row_ids'], table, features)\n\n self._add_numeric_values(table, token_ids_dict, features)\n\n self._add_numeric_values_scale(table, token_ids_dict, features)\n\n if table:\n features['table_id'] = create_string_feature(\n [table.table_id.encode('utf8')])\n features['table_id_hash'] = create_int_feature(\n [fingerprint(table.table_id) % _MAX_INT])\n return features\n\n\nclass ToPretrainingTensorflowExample(ToTensorflowExampleBase):\n \"\"\"Class for converting pretraining examples.\"\"\"\n\n def __init__(self, config):\n super(ToPretrainingTensorflowExample, self).__init__(config)\n self._max_predictions_per_seq = config.max_predictions_per_seq\n self._masked_lm_prob = config.masked_lm_prob\n self._min_question_length = config.min_question_length\n self._max_question_length = config.max_question_length\n self._concatenate_snippets = config.concatenate_snippets\n self._always_continue_cells = config.always_continue_cells\n self._question_buckets = [\n self._min_question_length,\n (self._min_question_length + self._max_question_length) / 2,\n self._max_question_length\n ]\n self._vocab_words = list(self._tokenizer.get_vocab())\n\n def _to_example(self, table,\n instance):\n \"\"\"Creates TF example from TrainingInstance.\"\"\"\n\n features = self._to_features(\n instance.tokens, {\n 'column_ids': instance.column_ids,\n 'prev_label_ids': [0] * len(instance.tokens),\n 'row_ids': instance.row_ids,\n 'segment_ids': instance.segment_ids,\n },\n table=table,\n question=None)\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = self._tokenizer.convert_tokens_to_ids(\n instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < self._max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n is_random_table = 1 if instance.is_random_table else 0\n\n features['masked_lm_positions'] = create_int_feature(masked_lm_positions)\n features['masked_lm_ids'] = create_int_feature(masked_lm_ids)\n features['masked_lm_weights'] = create_float_feature(masked_lm_weights)\n features['next_sentence_labels'] = create_int_feature([is_random_table])\n features['is_random_table'] = create_int_feature([is_random_table])\n\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n def convert(\n self,\n rng,\n interaction,\n random_table,\n ):\n \"\"\"Creates TF example from interaction.\"\"\"\n question_tokens = self._get_question_tokens(interaction, rng)\n\n if random_table is not None and rng.random() < 0.5:\n is_random_table = True\n table = random_table\n else:\n is_random_table = False\n if interaction.HasField('table'):\n table = interaction.table\n else:\n table = None\n\n if table is None:\n beam_metrics.Metrics.counter(_NS, 'Examples without tables').inc()\n question_tokens = self._tokenizer.tokenize(\n interaction.questions[0].original_text)\n question_tokens = question_tokens[:self._max_seq_length - 1]\n tokens, segment_ids, column_ids, row_ids = self._serialize_text(\n question_tokens)\n else:\n if (not question_tokens or\n len(question_tokens) < self._min_question_length):\n beam_metrics.Metrics.counter(\n _NS,\n f'Remove question below the min length {self._min_question_length}'\n ).inc()\n return None\n beam_metrics.Metrics.counter(_NS, 'Examples with tables').inc()\n beam_metrics.Metrics.counter(\n _NS,\n _get_buckets(\n len(question_tokens), self._question_buckets,\n 'Question Length')).inc()\n if random_table is not None:\n logging.log_every_n(logging.INFO,\n 'Table: %s Random Table: %s is_random_table: %s',\n 500000, interaction.table.table_id,\n random_table.table_id, is_random_table)\n\n token_budget = self._get_token_budget(question_tokens)\n tokenized_table = self._tokenize_table(table)\n try:\n num_columns, num_rows, num_tokens = self._get_table_sizes(\n token_budget, tokenized_table, rng)\n except ValueError:\n return None\n\n serialized_example = self._serialize(question_tokens, tokenized_table,\n num_columns, num_rows, num_tokens)\n tokens = serialized_example.tokens\n segment_ids = serialized_example.segment_ids\n row_ids = serialized_example.row_ids\n column_ids = serialized_example.column_ids\n\n assert len(tokens) <= self._max_seq_length\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = self._create_masked_lm_predictions(\n interaction, tokens, column_ids, row_ids, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n column_ids=column_ids,\n row_ids=row_ids,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels,\n is_random_table=is_random_table)\n return self._to_example(table, instance)\n\n def _create_masked_lm_predictions(\n self, interaction, tokens,\n column_ids, row_ids,\n rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token.piece in [_CLS, _SEP]:\n continue\n column_id = column_ids[i]\n is_cell_continutation = column_id > 0 and column_id == column_ids[i - 1]\n if not self._always_continue_cells:\n is_cell_continutation = False\n if cand_indexes and (_is_inner_wordpiece(token) or is_cell_continutation):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(self._max_predictions_per_seq,\n max(1, int(round(len(tokens) * self._masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n\n for index in index_set:\n assert index not in covered_indexes\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = _MASK\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index].piece\n # 10% of the time, replace with random word\n else:\n masked_token = rng.choice(self._vocab_words)\n\n output_tokens[index] = Token(tokens[index].original_text, masked_token)\n\n masked_lms.append(\n MaskedLmInstance(index=index, label=tokens[index].piece))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n def _get_question_tokens(self, interaction,\n rng):\n \"\"\"Randomly gets a snippet of relevant text.\"\"\"\n questions = [q.text for q in interaction.questions]\n rng.shuffle(questions)\n if not self._concatenate_snippets:\n # Find the first snippet that satisfies the requirements.\n for question in questions:\n tokens = self._tokenizer.tokenize(question)\n if len(tokens) > self._max_question_length:\n continue\n if len(tokens) < self._min_question_length:\n continue\n return tokens\n return None\n tokens = []\n for question in questions:\n tokens += self._tokenizer.tokenize(question)\n\n if len(tokens) < self._min_question_length:\n return None\n\n max_start_index = len(tokens) - self._min_question_length\n start_index = rng.randint(0, max_start_index)\n while start_index >= 0 and _is_inner_wordpiece(tokens[start_index]):\n start_index -= 1\n\n min_end_index = start_index + self._min_question_length\n max_end_index = min(len(tokens), self._max_question_length + start_index)\n assert min_end_index <= max_end_index\n end_index = rng.randint(min_end_index, max_end_index)\n assert (self._min_question_length <= end_index - start_index <=\n self._max_question_length)\n while end_index < len(tokens) and _is_inner_wordpiece(tokens[end_index]):\n end_index += 1\n\n return tokens[start_index:end_index]\n\n def _get_table_sizes(self, token_budget, table,\n rng):\n \"\"\"Computes column, row and token count for table.\"\"\"\n num_columns = 1\n num_rows = 1\n num_tokens = 1\n table_cost = self._get_table_cost(table, num_columns, num_rows, num_tokens)\n if table_cost > token_budget:\n raise ValueError('Cannot create table that fits budget')\n\n max_num_rows, max_num_columns, max_num_tokens = self._get_table_boundaries(\n table)\n\n while (num_columns < max_num_columns or num_rows < max_num_rows or\n num_tokens < max_num_tokens):\n if num_columns < max_num_columns and rng.random() < 0.5:\n cost = self._get_table_cost(table, num_columns + 1, num_rows,\n num_tokens)\n if cost > token_budget:\n break\n num_columns += 1\n if num_rows < max_num_rows and rng.random() < 0.5:\n cost = self._get_table_cost(table, num_columns, num_rows + 1,\n num_tokens)\n if cost > token_budget:\n break\n num_rows += 1\n if num_tokens < max_num_tokens and rng.random() < 0.5:\n cost = self._get_table_cost(table, num_columns, num_rows,\n num_tokens + 1)\n if cost > token_budget:\n break\n num_tokens += 1\n\n buckets = [8, 16, 32, 64, 128, 256]\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(num_columns * num_rows, buckets,\n 'Trimmed Table Size')).inc()\n\n # First row is the header.\n real_num_columns = len(table.rows[0]) if table.rows else 0\n # We don't count the header row.\n real_num_rows = len(table.rows) - 1\n\n beam_metrics.Metrics.counter(\n _NS,\n _get_buckets(real_num_columns * real_num_rows, buckets,\n 'Real Table Size')).inc()\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(num_columns, buckets, 'Column Sizes')).inc()\n beam_metrics.Metrics.counter(_NS,\n _get_buckets(num_rows, buckets,\n 'Row Sizes')).inc()\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(num_tokens, buckets, 'Table Token Sizes')).inc()\n\n return num_columns, num_rows, num_tokens\n\n\nclass ToTrimmedTensorflowExample(ToTensorflowExampleBase):\n \"\"\"Helper that allows squeezing a table into the max seq length.\"\"\"\n\n def __init__(self, config):\n super(ToTrimmedTensorflowExample, self).__init__(config)\n self._cell_trim_length = config.cell_trim_length\n\n def _get_num_columns(self, table):\n num_columns = len(table.columns)\n if num_columns >= self._max_column_id:\n raise ValueError('Too many columns')\n return num_columns\n\n def _get_num_rows(self, table,\n drop_rows_to_fit):\n num_rows = len(table.rows)\n if num_rows >= self._max_row_id:\n if drop_rows_to_fit:\n num_rows = self._max_row_id - 1\n else:\n raise ValueError('Too many rows')\n return num_rows\n\n def _to_trimmed_features(\n self,\n question,\n table,\n question_tokens,\n tokenized_table,\n num_columns,\n num_rows,\n drop_rows_to_fit = False,\n ):\n \"\"\"Finds optiomal number of table tokens to include and serializes.\"\"\"\n init_num_rows = num_rows\n while True:\n num_tokens = self._get_max_num_tokens(\n question_tokens,\n tokenized_table,\n num_rows=num_rows,\n num_columns=num_columns,\n )\n if num_tokens is not None:\n # We could fit the table.\n break\n if not drop_rows_to_fit or num_rows == 0:\n raise ValueError('Sequence too long')\n # Try to drop a row to fit the table.\n num_rows -= 1\n if init_num_rows != num_rows:\n beam_metrics.Metrics.counter(_NS, 'Tables with trimmed rows').inc()\n serialized_example = self._serialize(question_tokens, tokenized_table,\n num_columns, num_rows, num_tokens)\n\n assert len(serialized_example.tokens) <= self._max_seq_length\n\n feature_dict = {\n 'column_ids': serialized_example.column_ids,\n 'row_ids': serialized_example.row_ids,\n 'segment_ids': serialized_example.segment_ids,\n }\n features = self._to_features(\n serialized_example.tokens, feature_dict, table=table, question=question)\n return serialized_example, features\n\n def _get_max_num_tokens(\n self,\n question_tokens,\n tokenized_table,\n num_columns,\n num_rows,\n ):\n \"\"\"Computes max number of tokens that can be squeezed into the budget.\"\"\"\n token_budget = self._get_token_budget(question_tokens)\n _, _, max_num_tokens = self._get_table_boundaries(tokenized_table)\n if self._cell_trim_length >= 0 and max_num_tokens > self._cell_trim_length:\n max_num_tokens = self._cell_trim_length\n num_tokens = 0\n for num_tokens in range(max_num_tokens + 1):\n cost = self._get_table_cost(tokenized_table, num_columns, num_rows,\n num_tokens + 1)\n if cost > token_budget:\n break\n if num_tokens < max_num_tokens:\n if self._cell_trim_length >= 0:\n # We don't allow dynamic trimming if a cell_trim_length is set.\n return None\n if num_tokens == 0:\n return None\n beam_metrics.Metrics.counter(_NS, 'Tables with trimmed cells').inc()\n return num_tokens\n\n\nclass ToClassifierTensorflowExample(ToTrimmedTensorflowExample):\n \"\"\"Class for converting finetuning examples.\"\"\"\n\n def __init__(self, config):\n super(ToClassifierTensorflowExample, self).__init__(config)\n self._add_aggregation_candidates = config.add_aggregation_candidates\n self._use_document_title = config.use_document_title\n self._use_context_title = config.use_context_title\n self._update_answer_coordinates = config.update_answer_coordinates\n self._drop_rows_to_fit = config.drop_rows_to_fit\n self._trim_question_ids = config.trim_question_ids\n self._expand_entity_descriptions = config.expand_entity_descriptions\n self._use_entity_title = config.use_entity_title\n self._entity_descriptions_sentence_limit = config.entity_descriptions_sentence_limit\n \n # TODO (Chia-Chun)\n self._is_multi_hop = config.is_multi_hop\n self._use_bridge_entity = config.use_bridge_entity\n self._use_question_type = config.use_question_type\n\n def _tokenize_extended_question(\n self,\n question,\n table,\n ):\n \"\"\"Runs tokenizer over the question text and document title if it's used.\"\"\"\n # (Chia-Chun): \n text_tokens = []\n if self._use_question_type:\n question_type_tokens = self._tokenizer.tokenize(question.question_type)\n text_tokens.extend(question_type_tokens)\n text_tokens.append(Token(_SEP, _SEP))\n if self._is_multi_hop:\n hop_tokens = self._tokenizer.tokenize(\"Hop is \" + str(question.hop))\n text_tokens.extend(hop_tokens)\n text_tokens.append(Token(_SEP, _SEP))\n\n if self._use_bridge_entity:\n for bridge_entity in question.bridge_entities:\n bridge_entity_tokens = self._tokenizer.tokenize(bridge_entity)\n text_tokens.extend(bridge_entity_tokens)\n text_tokens.append(Token(_SEP, _SEP))\n\n \n\n\n question_tokens = self._tokenizer.tokenize(question.text)\n text_tokens.extend(list(question_tokens))\n #import pdb;pdb.set_trace()\n if self._use_document_title and table.document_title:\n # TODO(thomasmueller) Consider adding a different segment id.\n document_title_tokens = self._tokenizer.tokenize(table.document_title)\n text_tokens.append(Token(_SEP, _SEP))\n text_tokens.extend(document_title_tokens)\n context_heading = table.context_heading\n if self._use_context_title and context_heading:\n context_title_tokens = self._tokenizer.tokenize(context_heading)\n text_tokens.append(Token(_SEP, _SEP))\n text_tokens.extend(context_title_tokens)\n \n\n\n\n return text_tokens\n\n def convert(self, interaction,\n index):\n \"\"\"Converts question at 'index' to example.\"\"\"\n table = interaction.table\n\n num_rows = self._get_num_rows(table, self._drop_rows_to_fit)\n num_columns = self._get_num_columns(table)\n\n question = interaction.questions[index]\n #debug, disable here\n if not interaction.questions[index].answer.is_valid:\n beam_metrics.Metrics.counter(\n _NS, 'Conversion skipped (answer not valid)').inc()\n raise ValueError('Invalid answer')\n\n annotation_descriptions_ext = (\n annotated_text_pb2.AnnotationDescription.annotation_descriptions_ext)\n if (self._expand_entity_descriptions and\n annotation_descriptions_ext in interaction.Extensions):\n descriptions = interaction.Extensions[\n annotation_descriptions_ext].descriptions\n _add_entity_descriptions_to_table(\n question,\n descriptions,\n table,\n use_entity_title=self._use_entity_title,\n num_results=self._entity_descriptions_sentence_limit)\n\n text_tokens = self._tokenize_extended_question(question, table)\n tokenized_table = self._tokenize_table(table)\n table_selection_ext = table_selection_pb2.TableSelection.table_selection_ext\n if table_selection_ext in question.Extensions:\n table_selection = question.Extensions[table_selection_ext]\n if not tokenized_table.selected_tokens:\n raise ValueError('No tokens selected')\n if table_selection.selected_tokens:\n selected_tokens = {(t.row_index, t.column_index, t.token_index)\n for t in table_selection.selected_tokens}\n tokenized_table.selected_tokens = [\n t for t in tokenized_table.selected_tokens\n if (t.row_index, t.column_index, t.token_index) in selected_tokens\n ]\n\n serialized_example, features = self._to_trimmed_features(\n question=question,\n table=table,\n question_tokens=text_tokens,\n tokenized_table=tokenized_table,\n num_columns=num_columns,\n num_rows=num_rows,\n drop_rows_to_fit=self._drop_rows_to_fit)\n\n column_ids = serialized_example.column_ids\n row_ids = serialized_example.row_ids\n\n def get_answer_ids(question):\n if self._update_answer_coordinates:\n return _find_answer_ids_from_answer_texts(\n column_ids,\n row_ids,\n tokenized_table,\n answer_texts=[\n self._tokenizer.tokenize(at)\n for at in question.answer.answer_texts\n ],\n )\n return _get_answer_ids(column_ids, row_ids, question)\n\n answer_ids = get_answer_ids(question)\n self._pad_to_seq_length(answer_ids)\n features['label_ids'] = create_int_feature(answer_ids)\n\n if index > 0:\n prev_answer_ids = get_answer_ids(interaction.questions[index - 1],)\n else:\n prev_answer_ids = [0] * len(column_ids)\n self._pad_to_seq_length(prev_answer_ids)\n features['prev_label_ids'] = create_int_feature(prev_answer_ids)\n features['question_id'] = create_string_feature(\n [question.id.encode('utf8')])\n if self._trim_question_ids:\n question_id = question.id[-text_utils.DEFAULT_INTS_LENGTH:]\n else:\n question_id = question.id\n features['question_id_ints'] = create_int_feature(\n text_utils.str_to_ints(\n question_id, length=text_utils.DEFAULT_INTS_LENGTH))\n features['aggregation_function_id'] = create_int_feature(\n [question.answer.aggregation_function])\n features['classification_class_index'] = create_int_feature(\n [question.answer.class_index])\n\n answer = question.answer.float_value if question.answer.HasField(\n 'float_value') else _NAN\n features['answer'] = create_float_feature([answer])\n\n if self._add_aggregation_candidates:\n rng = random.Random(fingerprint(question.id))\n\n candidates = interpretation_utils.find_candidates(rng, table, question)\n num_initial_candidates = len(candidates)\n\n candidates = [c for c in candidates if len(c.rows) < _MAX_NUM_ROWS]\n candidates = candidates[:_MAX_NUM_CANDIDATES]\n\n funs = [0] * _MAX_NUM_CANDIDATES\n sizes = [0] * _MAX_NUM_CANDIDATES\n indexes = []\n\n num_final_candidates = 0\n for index, candidate in enumerate(candidates):\n token_indexes = []\n for row in candidate.rows:\n token_indexes += _get_cell_token_indexes(column_ids, row_ids,\n candidate.column, row)\n if len(indexes) + len(serialized_example.tokens) > _MAX_INDEX_LENGTH:\n break\n num_final_candidates += 1\n sizes[index] = len(token_indexes)\n funs[index] = candidate.agg_function\n indexes += token_indexes\n\n # <int>[1]\n features['cand_num'] = create_int_feature([num_final_candidates])\n # <int>[_MAX_NUM_CANDIDATES]\n features['can_aggregation_function_ids'] = create_int_feature(funs)\n # <int>[_MAX_NUM_CANDIDATES]\n features['can_sizes'] = create_int_feature(sizes)\n # <int>[_MAX_INDEX_LENGTH]\n # Actual length is sum(sizes).\n features['can_indexes'] = create_int_feature(indexes)\n\n if num_initial_candidates > 0:\n beam_metrics.Metrics.counter(\n _NS,\n _get_buckets(num_initial_candidates,\n [10, 20, 50, 100, 200, 500, 1000, 1200, 1500],\n 'Candidates Size:')).inc()\n\n beam_metrics.Metrics.counter(_NS, 'Candidates: Input').inc()\n if num_final_candidates != num_initial_candidates:\n beam_metrics.Metrics.counter(_NS,\n 'Candidates: Dropped candidates').inc()\n\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n def get_empty_example(self):\n interaction = interaction_pb2.Interaction(questions=[\n interaction_pb2.Question(id=text_utils.get_padded_question_id())\n ])\n return self.convert(interaction, index=0)\n",
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Impl. of a simple TF-IDF baseline for table retrieval.\"\"\"\n\nimport collections\nimport math\nfrom typing import Text, Iterable, List, Tuple, Mapping, Optional, Callable\n\nfrom absl import logging\nimport dataclasses\nfrom gensim.summarization import bm25\nfrom tapas.protos import interaction_pb2\nfrom tapas.utils import text_utils\nimport tensorflow.compat.v1 as tf\nimport tqdm\n\nimport nltk\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\nstop_words = set(stopwords.words('english'))\n\ndef iterate_tables(table_file):\n for value in tf.python_io.tf_record_iterator(table_file):\n table = interaction_pb2.Table()\n table.ParseFromString(value)\n yield table\n\n\ndef iterate_interaction_tables(\n interaction_file):\n for value in tf.python_io.tf_record_iterator(interaction_file):\n interaction = interaction_pb2.Interaction()\n interaction.ParseFromString(value)\n yield interaction.table\n\n\ndef _iterate_table_texts(table,\n title_multiplicator):\n # repeat title and header multiple times\n for _ in range(title_multiplicator):\n if table.document_title:\n yield table.document_title\n for column in table.columns:\n yield column.text\n # ---------- debug starts ----------\n # comment out to see retrieval performance no body text\n for row in table.rows:\n for cell in row.cells:\n yield cell.text\n # ---------- debug ends ----------\n\n# --------------- custom starts -----------------\ndef _iterate_weighted_table_texts(\n table,weight_title,weight_header,weight_content,\n weight_sec_title,weight_caption,weight_abbv,\n filter_abbv=False): #, filter_abbv_func: Callable=None):\n # repeat title and header multiple times\n if weight_abbv>0:\n table_texts = set(_tokenize(' '.join([\n table.document_title,\n table.section_title,\n table.caption,\n ' '.join([column.text for column in table.columns]),\n ' '.join([cell.text for row in table.rows for cell in row.cells])\n ])))\n else:\n table_texts = ''\n for _ in range(weight_title):\n if table.document_title:\n yield table.document_title\n for _ in range(weight_sec_title):\n if table.section_title:\n yield table.section_title\n for _ in range(weight_caption):\n if table.caption:\n yield table.caption\n for _ in range(weight_header):\n for column in table.columns:\n yield column.text\n for _ in range(weight_content):\n for row in table.rows:\n for cell in row.cells:\n yield cell.text\n for _ in range(weight_abbv):\n # if filter_abbv_func is not None:\n # yield ' '.join(filter_abbv_func(table))\n\n # TODO: refactor, move this into filter_abbv_func api \n if table.abbvs:\n for abbv in table.abbvs:\n if filter_abbv:\n s1 = set(_tokenize(abbv.abbreviation)) & table_texts\n s2 = set(_tokenize(abbv.expansion))\n if (len(s1)>0 or s2.issubset(table_texts)):\n yield abbv.abbreviation.lower()\n else:\n yield f\"{abbv.abbreviation.lower()}\"#\" is {abbv.expansion}\"\n \n\ndef _iterate_custom_tokenized_table_texts(table,\n title_multiplicator, \n weight_header,\n weight_content,\n weight_sec_title,\n weight_caption,\n weight_abbv,\n cased=False,\n filter_abbv=False\n ):\n for text in _iterate_weighted_table_texts(\n table, title_multiplicator, \n weight_header, weight_content, weight_sec_title,\n weight_caption, weight_abbv, filter_abbv=filter_abbv\n ):\n yield from _tokenize(text, cased=cased) #try not lower????\n\n# ---------------- custom ends -------------------\n\ndef _iterate_tokenized_table_texts(table,\n title_multiplicator):\n for text in _iterate_table_texts(table, title_multiplicator):\n yield from _tokenize(text)\n\n\ndef _tokenize(text, cased=False):\n return text_utils.tokenize_text(text_utils.format_text(text,cased))\n\n\[email protected](frozen=True)\nclass TableFrequency:\n table_index: int\n score: float\n\n\[email protected](frozen=True)\nclass IndexEntry:\n table_counts: List[TableFrequency]\n\n\nclass InvertedIndex:\n \"\"\"Inverted Index implementation.\"\"\"\n\n def __init__(self, table_ids, index):\n self.table_ids_ = table_ids\n self._table_ids = table_ids\n self._tid2idx = {table_id: i for i, table_id in enumerate(table_ids)}\n self.index_ = index\n\n def retrieve(self, question,cased=False, sort_by_score=True):\n \"\"\"Retrieves tables sorted by descending score.\"\"\"\n if cased:\n raise NotImplementedError('cased query not implemented')\n hits = collections.defaultdict(list)\n\n num_tokens = 0\n for token in _tokenize(question):\n num_tokens += 1\n index_entry = self.index_.get(token, None)\n if index_entry is None:\n continue\n\n for table_count in index_entry.table_counts:\n scores = hits[table_count.table_index]\n scores.append(table_count.score)\n\n scored_hits = []\n for table_index, inv_document_freqs in hits.items():\n score = sum(inv_document_freqs) / num_tokens\n scored_hits.append((self.table_ids_[table_index], score))\n if sort_by_score:\n scored_hits.sort(key=lambda name_score: name_score[1], reverse=True)\n return scored_hits\n\n\ndef _remove_duplicates(\n tables):\n table_id_set = set()\n for table in tables:\n if table.table_id in table_id_set:\n logging.info('Duplicate table ids: %s', table.table_id)\n continue\n table_id_set.add(table.table_id)\n yield table\n\n# ----------- custom starts ------------\ndef create_uneven_inverted_index(\n tables,\n title_multiplicator=1,\n weight_sec_title = 1,\n weight_caption = 0,\n weight_header=0,\n weight_content=0,\n weight_abbv=0,\n cased=False,\n filter_abbv=False,\n min_rank=0,\n drop_term_frequency=True\n ):\n table_ids = []\n token_to_info = collections.defaultdict(lambda: collections.defaultdict(int))\n for table in tqdm.tqdm(_remove_duplicates(tables)):\n table_index = len(table_ids)\n table_ids.append(table.table_id)\n for token in _iterate_custom_tokenized_table_texts(\n table, \n title_multiplicator=title_multiplicator, \n weight_header=weight_header, \n weight_content=weight_content,\n weight_sec_title=weight_sec_title,\n weight_caption=weight_caption,\n weight_abbv=weight_abbv,\n cased=cased,\n filter_abbv=filter_abbv\n ):\n token_to_info[token][table_index] += 1\n\n logging.info('Table Ids: %d', len(table_ids))\n logging.info('Num types: %d', len(token_to_info))\n\n def count_fn(table_counts):\n return sum(table_counts.values())\n\n token_to_info = list(token_to_info.items())\n token_to_info.sort(\n key=lambda token_info: count_fn(token_info[1]), reverse=True)\n\n index = {}\n for freq_rank, (token, table_counts) in enumerate(token_to_info):\n df = count_fn(table_counts)\n if freq_rank < min_rank:\n logging.info(\n 'Filter \"%s\" for index (%d, rank: %d).', token, df, freq_rank)\n continue\n idf = 1.0 / (math.log(df, 2) + 1)\n counts = []\n for table, count in table_counts.items():\n if drop_term_frequency:\n count = 1.0\n counts.append(TableFrequency(table, idf * count))\n index[token] = IndexEntry(counts)\n\n return InvertedIndex(table_ids, index)\n# ----------- custom ends -----------\n\n\ndef create_inverted_index(tables,\n title_multiplicator=1,\n min_rank = 0,\n drop_term_frequency = True):\n \"\"\"Creates an index for some tables.\n\n Args:\n tables: Tables to index\n title_multiplicator: Emphasize words in title or header.\n min_rank: Word types with a frequency rank lower than this will be ignored.\n Can be useful to remove stop words.\n drop_term_frequency: Don't consider term frequency.\n\n Returns:\n the inverted index.\n \"\"\"\n table_ids = []\n token_to_info = collections.defaultdict(lambda: collections.defaultdict(int))\n for table in _remove_duplicates(tables):\n table_index = len(table_ids)\n table_ids.append(table.table_id)\n for token in _iterate_tokenized_table_texts(table, title_multiplicator):\n token_to_info[token][table_index] += 1\n\n logging.info('Table Ids: %d', len(table_ids))\n logging.info('Num types: %d', len(token_to_info))\n\n def count_fn(table_counts):\n return sum(table_counts.values())\n\n token_to_info = list(token_to_info.items())\n token_to_info.sort(\n key=lambda token_info: count_fn(token_info[1]), reverse=True)\n\n index = {}\n for freq_rank, (token, table_counts) in enumerate(token_to_info):\n df = count_fn(table_counts)\n if freq_rank < min_rank:\n logging.info(\n 'Filter \"%s\" for index (%d, rank: %d).', token, df, freq_rank)\n continue\n idf = 1.0 / (math.log(df, 2) + 1)\n counts = []\n for table, count in table_counts.items():\n if drop_term_frequency:\n count = 1.0\n counts.append(TableFrequency(table, idf * count))\n index[token] = IndexEntry(counts)\n\n return InvertedIndex(table_ids, index)\n\n\nclass BM25Index:\n \"\"\"Index based on gensim BM25.\"\"\"\n\n def __init__(self, corpus, table_ids, k1=1.5, b=0.75):\n self._table_ids = table_ids\n self._model = bm25.BM25(corpus,k1=k1,b=b)\n self._tid2idx = {table_id: i for i, table_id in enumerate(table_ids)}\n\n def retrieve(self, question,cased=False, \n use_oracle_abbv=False, oracle_abbv_map=None, filter_stop_words=False,sort_by_score=True):\n q_tokens = _tokenize(question,cased)\n # TODO This is slow maybe we can improve efficiency.\n \n #-------------refactor this part-----------------\n expanded_q_tokens = []\n if use_oracle_abbv:\n for qtok in q_tokens:\n expanded_q_tokens.append(qtok)\n if qtok in oracle_abbv_map:\n expanded_q_tokens.extend(\n _tokenize(oracle_abbv_map[qtok],cased)\n )\n if len(expanded_q_tokens) < len(q_tokens):\n raise ValueError('Expanded query tokens are less than original query tokens')\n \n q_tokens = expanded_q_tokens if expanded_q_tokens else q_tokens\n \n if filter_stop_words:\n q_tokens = [qtok for qtok in q_tokens if qtok not in stop_words]\n #-------------refactor above---------------------\n \n scores = self._model.get_scores(q_tokens)\n if sort_by_score:\n table_scores = [(self._table_ids[index], score)\n for index, score in enumerate(scores)\n if score > 0.0]\n \n table_scores.sort(key=lambda table_score: table_score[1], reverse=True)\n else:\n table_scores = [(self._table_ids[index], score)\n for index, score in enumerate(scores)]\n return table_scores\n\n\ndef create_bm25_index(\n tables,\n title_multiplicator=1,\n num_tables = None,\n):\n \"\"\"Creates a new index.\"\"\"\n corpus = []\n table_ids = []\n for table in tqdm.tqdm(_remove_duplicates(tables), total=num_tables):\n corpus.append(\n list(_iterate_tokenized_table_texts(table, title_multiplicator)))\n table_ids.append(table.table_id)\n return BM25Index(corpus, table_ids)\n\n# --------------- custom starts -----------------\ndef create_uneven_bm25_index(\n tables,\n title_multiplicator=1, # weight doc title\n num_tables = None,\n weight_header=1,\n weight_content=1,\n weight_sec_title=1,\n weight_caption=1,\n weight_abbv=1,\n cased = False,\n filter_abbv=False\n):\n \"\"\"Creates a new index.\"\"\"\n corpus = []\n table_ids = []\n for table in tqdm.tqdm(_remove_duplicates(tables), total=num_tables):\n corpus.append(\n list(_iterate_custom_tokenized_table_texts(\n table, \n title_multiplicator, \n weight_header,\n weight_content,\n weight_sec_title,\n weight_caption,\n weight_abbv,\n cased = cased,\n filter_abbv = filter_abbv\n )))\n table_ids.append(table.table_id)\n return BM25Index(corpus, table_ids)\n# --------------- custom ends -----------------"
] | [
[
"tensorflow.compat.v1.train.Features",
"tensorflow.compat.v1.io.gfile.GFile"
],
[
"tensorflow.compat.v1.python_io.tf_record_iterator"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gdepalma93/bright-athlete-academy | [
"54ba0cc6633637c1bd6d90120153e04b981244bf",
"54ba0cc6633637c1bd6d90120153e04b981244bf",
"54ba0cc6633637c1bd6d90120153e04b981244bf",
"54ba0cc6633637c1bd6d90120153e04b981244bf"
] | [
"Resources/books/long_short_term_memory_networks_with_python/code/lesson_12/tune_batch_size.py",
"Resources/books/deep_learning_time_series_forecasting/code/chapter_14/03_cnn_forecast_model.py",
"Resources/books/deep_learning_time_series_forecasting/code/chapter_16/05_plots_of_daily_power_consumption.py",
"Resources/books/deep_learning_time_series_forecasting/code/chapter_24/04_cnn_tune_filter_maps.py"
] | [
"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom matplotlib import pyplot\nfrom pandas import DataFrame\nfrom numpy import array\n\n# return training data\ndef get_train():\n\tseq = [[0.0, 0.1], [0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5]]\n\tseq = array(seq)\n\tX, y = seq[:, 0], seq[:, 1]\n\tX = X.reshape((5, 1, 1))\n\treturn X, y\n\n# return validation data\ndef get_val():\n\tseq = [[0.5, 0.6], [0.6, 0.7], [0.7, 0.8], [0.8, 0.9], [0.9, 1.0]]\n\tseq = array(seq)\n\tX, y = seq[:, 0], seq[:, 1]\n\tX = X.reshape((len(X), 1, 1))\n\treturn X, y\n\n# fit an LSTM model\ndef fit_model(n_batch):\n\t# define model\n\tmodel = Sequential()\n\tmodel.add(LSTM(10, input_shape=(1,1)))\n\tmodel.add(Dense(1, activation='linear'))\n\t# compile model\n\tmodel.compile(loss='mse', optimizer='adam')\n\t# fit model\n\tX,y = get_train()\n\tmodel.fit(X, y, epochs=500, shuffle=False, verbose=0, batch_size=n_batch)\n\t# evaluate model\n\tvalX, valY = get_val()\n\tloss = model.evaluate(valX, valY, verbose=0)\n\treturn loss\n\n# define scope of search\nparams = [1, 2, 3]\nn_repeats = 5\n# grid search parameter values\nscores = DataFrame()\nfor value in params:\n\t# repeat each experiment multiple times\n\tloss_values = list()\n\tfor i in range(n_repeats):\n\t\tloss = fit_model(value)\n\t\tloss_values.append(loss)\n\t\tprint('>%d/%d param=%f, loss=%f' % (i+1, n_repeats, value, loss))\n\t# store results for this parameter\n\tscores[str(value)] = loss_values\n# summary statistics of results\nprint(scores.describe())\n# box and whisker plot of results\nscores.boxplot()\npyplot.show()\n",
"# evaluate cnn for monthly car sales dataset\nfrom math import sqrt\nfrom numpy import array\nfrom numpy import mean\nfrom numpy import std\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom pandas import read_csv\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom matplotlib import pyplot\n\n# split a univariate dataset into train/test sets\ndef train_test_split(data, n_test):\n\treturn data[:-n_test], data[-n_test:]\n\n# transform list into supervised learning format\ndef series_to_supervised(data, n_in, n_out=1):\n\tdf = DataFrame(data)\n\tcols = list()\n\t# input sequence (t-n, ... t-1)\n\tfor i in range(n_in, 0, -1):\n\t\tcols.append(df.shift(i))\n\t# forecast sequence (t, t+1, ... t+n)\n\tfor i in range(0, n_out):\n\t\tcols.append(df.shift(-i))\n\t# put it all together\n\tagg = concat(cols, axis=1)\n\t# drop rows with NaN values\n\tagg.dropna(inplace=True)\n\treturn agg.values\n\n# root mean squared error or rmse\ndef measure_rmse(actual, predicted):\n\treturn sqrt(mean_squared_error(actual, predicted))\n\n# fit a model\ndef model_fit(train, config):\n\t# unpack config\n\tn_input, n_filters, n_kernel, n_epochs, n_batch = config\n\t# prepare data\n\tdata = series_to_supervised(train, n_input)\n\ttrain_x, train_y = data[:, :-1], data[:, -1]\n\ttrain_x = train_x.reshape((train_x.shape[0], train_x.shape[1], 1))\n\t# define model\n\tmodel = Sequential()\n\tmodel.add(Conv1D(n_filters, n_kernel, activation='relu', input_shape=(n_input, 1)))\n\tmodel.add(Conv1D(n_filters, n_kernel, activation='relu'))\n\tmodel.add(MaxPooling1D())\n\tmodel.add(Flatten())\n\tmodel.add(Dense(1))\n\tmodel.compile(loss='mse', optimizer='adam')\n\t# fit\n\tmodel.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0)\n\treturn model\n\n# forecast with a pre-fit model\ndef model_predict(model, history, config):\n\t# unpack config\n\tn_input, _, _, _, _ = config\n\t# prepare data\n\tx_input = array(history[-n_input:]).reshape((1, n_input, 1))\n\t# forecast\n\tyhat = model.predict(x_input, verbose=0)\n\treturn yhat[0]\n\n# walk-forward validation for univariate data\ndef walk_forward_validation(data, n_test, cfg):\n\tpredictions = list()\n\t# split dataset\n\ttrain, test = train_test_split(data, n_test)\n\t# fit model\n\tmodel = model_fit(train, cfg)\n\t# seed history with training dataset\n\thistory = [x for x in train]\n\t# step over each time-step in the test set\n\tfor i in range(len(test)):\n\t\t# fit model and make forecast for history\n\t\tyhat = model_predict(model, history, cfg)\n\t\t# store forecast in list of predictions\n\t\tpredictions.append(yhat)\n\t\t# add actual observation to history for the next loop\n\t\thistory.append(test[i])\n\t# estimate prediction error\n\terror = measure_rmse(test, predictions)\n\tprint(' > %.3f' % error)\n\treturn error\n\n# repeat evaluation of a config\ndef repeat_evaluate(data, config, n_test, n_repeats=30):\n\t# fit and evaluate the model n times\n\tscores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)]\n\treturn scores\n\n# summarize model performance\ndef summarize_scores(name, scores):\n\t# print a summary\n\tscores_m, score_std = mean(scores), std(scores)\n\tprint('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std))\n\t# box and whisker plot\n\tpyplot.boxplot(scores)\n\tpyplot.show()\n\nseries = read_csv('monthly-car-sales.csv', header=0, index_col=0)\ndata = series.values\n# data split\nn_test = 12\n# define config\nconfig = [36, 256, 3, 100, 100]\n# grid search\nscores = repeat_evaluate(data, config, n_test)\n# summarize scores\nsummarize_scores('cnn', scores)",
"# daily line plots for power usage dataset\nfrom pandas import read_csv\nfrom matplotlib import pyplot\n# load the new file\ndataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])\n# plot active power for each year\ndays = [x for x in range(1, 20)]\npyplot.figure()\nfor i in range(len(days)):\n\t# prepare subplot\n\tax = pyplot.subplot(len(days), 1, i+1)\n\t# determine the day to plot\n\tday = '2007-01-' + str(days[i])\n\t# get all observations for the day\n\tresult = dataset[day]\n\t# plot the active power for the day\n\tpyplot.plot(result['Global_active_power'])\n\t# add a title to the subplot\n\tpyplot.title(day, y=0, loc='left', size=6)\n\t# turn off ticks to remove clutter\n\tpyplot.yticks([])\n\tpyplot.xticks([])\npyplot.show()",
"# cnn model with filters for the har dataset\nfrom numpy import mean\nfrom numpy import std\nfrom numpy import dstack\nfrom pandas import read_csv\nfrom matplotlib import pyplot\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Dropout\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.utils import to_categorical\n\n# load a single file as a numpy array\ndef load_file(filepath):\n\tdataframe = read_csv(filepath, header=None, delim_whitespace=True)\n\treturn dataframe.values\n\n# load a list of files and return as a 3d numpy array\ndef load_group(filenames, prefix=''):\n\tloaded = list()\n\tfor name in filenames:\n\t\tdata = load_file(prefix + name)\n\t\tloaded.append(data)\n\t# stack group so that features are the 3rd dimension\n\tloaded = dstack(loaded)\n\treturn loaded\n\n# load a dataset group, such as train or test\ndef load_dataset_group(group, prefix=''):\n\tfilepath = prefix + group + '/Inertial Signals/'\n\t# load all 9 files as a single array\n\tfilenames = list()\n\t# total acceleration\n\tfilenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt']\n\t# body acceleration\n\tfilenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt']\n\t# body gyroscope\n\tfilenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt']\n\t# load input data\n\tX = load_group(filenames, filepath)\n\t# load class output\n\ty = load_file(prefix + group + '/y_'+group+'.txt')\n\treturn X, y\n\n# load the dataset, returns train and test X and y elements\ndef load_dataset(prefix=''):\n\t# load all train\n\ttrainX, trainy = load_dataset_group('train', prefix + 'HARDataset/')\n\t# load all test\n\ttestX, testy = load_dataset_group('test', prefix + 'HARDataset/')\n\t# zero-offset class values\n\ttrainy = trainy - 1\n\ttesty = testy - 1\n\t# one hot encode y\n\ttrainy = to_categorical(trainy)\n\ttesty = to_categorical(testy)\n\treturn trainX, trainy, testX, testy\n\n# fit and evaluate a model\ndef evaluate_model(trainX, trainy, testX, testy, n_filters):\n\tverbose, epochs, batch_size = 0, 10, 32\n\tn_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\n\tmodel = Sequential()\n\tmodel.add(Conv1D(n_filters, 3, activation='relu', input_shape=(n_timesteps,n_features)))\n\tmodel.add(Conv1D(n_filters, 3, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(MaxPooling1D())\n\tmodel.add(Flatten())\n\tmodel.add(Dense(100, activation='relu'))\n\tmodel.add(Dense(n_outputs, activation='softmax'))\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\t# fit network\n\tmodel.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)\n\t# evaluate model\n\t_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)\n\treturn accuracy\n\n# summarize scores\ndef summarize_results(scores, params):\n\tprint(scores, params)\n\t# summarize mean and standard deviation\n\tfor i in range(len(scores)):\n\t\tm, s = mean(scores[i]), std(scores[i])\n\t\tprint('Param=%d: %.3f%% (+/-%.3f)' % (params[i], m, s))\n\t# boxplot of scores\n\tpyplot.boxplot(scores, labels=params)\n\tpyplot.savefig('exp_cnn_filters.png')\n\n# run an experiment\ndef run_experiment(params, repeats=10):\n\t# load data\n\ttrainX, trainy, testX, testy = load_dataset()\n\t# test each parameter\n\tall_scores = list()\n\tfor p in params:\n\t\t# repeat experiment\n\t\tscores = list()\n\t\tfor r in range(repeats):\n\t\t\tscore = evaluate_model(trainX, trainy, testX, testy, p)\n\t\t\tscore = score * 100.0\n\t\t\tprint('>p=%d #%d: %.3f' % (p, r+1, score))\n\t\t\tscores.append(score)\n\t\tall_scores.append(scores)\n\t# summarize results\n\tsummarize_results(all_scores, params)\n\n# run the experiment\nn_params = [8, 16, 32, 64, 128, 256]\nrun_experiment(n_params)"
] | [
[
"numpy.array",
"matplotlib.pyplot.show",
"pandas.DataFrame"
],
[
"matplotlib.pyplot.boxplot",
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"numpy.std",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.boxplot",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"numpy.dstack",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
GU-DataLab/fairness-and-missing-values | [
"4b9383d2e383ae49a0cd6c94e3c9cf7c3a584581",
"62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7",
"36a900aa235d1d53bd57e11c89e3f73f9a585aca",
"065ac24e7f5ec124f6cfe39ce21f085f4c87a401",
"4b9383d2e383ae49a0cd6c94e3c9cf7c3a584581",
"4b9383d2e383ae49a0cd6c94e3c9cf7c3a584581",
"62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7",
"065ac24e7f5ec124f6cfe39ce21f085f4c87a401",
"065ac24e7f5ec124f6cfe39ce21f085f4c87a401"
] | [
"env/lib/python3.7/site-packages/art/defences/preprocessor/variance_minimization.py",
"env/lib/python3.7/site-packages/art/attacks/poisoning/feature_collision_attack.py",
"models/tot_metrics.py",
"env/lib/python3.7/site-packages/tests/attacks/evasion/test_frame_saliency.py",
"env/lib/python3.7/site-packages/art/estimators/poison_mitigation/strip/strip.py",
"env/lib/python3.7/site-packages/art/attacks/inference/membership_inference/label_only_boundary_distance.py",
"env/lib/python3.7/site-packages/art/attacks/evasion/frame_saliency.py",
"env/lib/python3.7/site-packages/art/estimators/classification/GPy.py",
"env/lib/python3.7/site-packages/tests/defences/detector/poison/test_activation_defence.py"
] | [
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the total variance minimization defence `TotalVarMin`.\n\n| Paper link: https://openreview.net/forum?id=SyJ7ClWCb\n\n| Please keep in mind the limitations of defences. For more information on the limitations of this defence,\n see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see\n https://arxiv.org/abs/1902.06705\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom tqdm import tqdm\n\nfrom art.config import ART_NUMPY_DTYPE\nfrom art.defences.preprocessor.preprocessor import Preprocessor\n\nif TYPE_CHECKING:\n from art.utils import CLIP_VALUES_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass TotalVarMin(Preprocessor):\n \"\"\"\n Implement the total variance minimization defence approach.\n\n | Paper link: https://openreview.net/forum?id=SyJ7ClWCb\n\n | Please keep in mind the limitations of defences. For more information on the limitations of this\n defence, see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general,\n see https://arxiv.org/abs/1902.06705\n \"\"\"\n\n params = [\"prob\", \"norm\", \"lamb\", \"solver\", \"max_iter\", \"clip_values\", \"verbose\"]\n\n def __init__(\n self,\n prob: float = 0.3,\n norm: int = 2,\n lamb: float = 0.5,\n solver: str = \"L-BFGS-B\",\n max_iter: int = 10,\n clip_values: Optional[\"CLIP_VALUES_TYPE\"] = None,\n apply_fit: bool = False,\n apply_predict: bool = True,\n verbose: bool = False,\n ):\n \"\"\"\n Create an instance of total variance minimization.\n\n :param prob: Probability of the Bernoulli distribution.\n :param norm: The norm (positive integer).\n :param lamb: The lambda parameter in the objective function.\n :param solver: Current support: `L-BFGS-B`, `CG`, `Newton-CG`.\n :param max_iter: Maximum number of iterations when performing optimization.\n :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed\n for features.\n :param apply_fit: True if applied during fitting/training.\n :param apply_predict: True if applied during predicting.\n :param verbose: Show progress bars.\n \"\"\"\n super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)\n self.prob = prob\n self.norm = norm\n self.lamb = lamb\n self.solver = solver\n self.max_iter = max_iter\n self.clip_values = clip_values\n self.verbose = verbose\n self._check_params()\n\n def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n \"\"\"\n Apply total variance minimization to sample `x`.\n\n :param x: Sample to compress with shape `(batch_size, width, height, depth)`.\n :param y: Labels of the sample `x`. This function does not affect them in any way.\n :return: Similar samples.\n \"\"\"\n if len(x.shape) == 2:\n raise ValueError(\n \"Feature vectors detected. Variance minimization can only be applied to data with spatial dimensions.\"\n )\n x_preproc = x.copy()\n\n # Minimize one input at a time\n for i, x_i in enumerate(tqdm(x_preproc, desc=\"Variance minimization\", disable=not self.verbose)):\n mask = (np.random.rand(*x_i.shape) < self.prob).astype(\"int\")\n x_preproc[i] = self._minimize(x_i, mask)\n\n if self.clip_values is not None:\n np.clip(x_preproc, self.clip_values[0], self.clip_values[1], out=x_preproc)\n\n return x_preproc.astype(ART_NUMPY_DTYPE), y\n\n def _minimize(self, x: np.ndarray, mask: np.ndarray) -> np.ndarray:\n \"\"\"\n Minimize the total variance objective function.\n\n :param x: Original image.\n :param mask: A matrix that decides which points are kept.\n :return: A new image.\n \"\"\"\n z_min = x.copy()\n\n for i in range(x.shape[2]):\n res = minimize(\n self._loss_func,\n z_min[:, :, i].flatten(),\n (x[:, :, i], mask[:, :, i], self.norm, self.lamb),\n method=self.solver,\n jac=self._deri_loss_func,\n options={\"maxiter\": self.max_iter},\n )\n z_min[:, :, i] = np.reshape(res.x, z_min[:, :, i].shape)\n\n return z_min\n\n @staticmethod\n def _loss_func(z_init: np.ndarray, x: np.ndarray, mask: np.ndarray, norm: int, lamb: float) -> float:\n \"\"\"\n Loss function to be minimized.\n\n :param z_init: Initial guess.\n :param x: Original image.\n :param mask: A matrix that decides which points are kept.\n :param norm: The norm (positive integer).\n :param lamb: The lambda parameter in the objective function.\n :return: Loss value.\n \"\"\"\n res = np.sqrt(np.power(z_init - x.flatten(), 2).dot(mask.flatten()))\n z_init = np.reshape(z_init, x.shape)\n res += lamb * np.linalg.norm(z_init[1:, :] - z_init[:-1, :], norm, axis=1).sum()\n res += lamb * np.linalg.norm(z_init[:, 1:] - z_init[:, :-1], norm, axis=0).sum()\n\n return res\n\n @staticmethod\n def _deri_loss_func(z_init: np.ndarray, x: np.ndarray, mask: np.ndarray, norm: int, lamb: float) -> float:\n \"\"\"\n Derivative of loss function to be minimized.\n\n :param z_init: Initial guess.\n :param x: Original image.\n :param mask: A matrix that decides which points are kept.\n :param norm: The norm (positive integer).\n :param lamb: The lambda parameter in the objective function.\n :return: Derivative value.\n \"\"\"\n # First compute the derivative of the first component of the loss function\n nor1 = np.sqrt(np.power(z_init - x.flatten(), 2).dot(mask.flatten()))\n if nor1 < 1e-6:\n nor1 = 1e-6\n der1 = ((z_init - x.flatten()) * mask.flatten()) / (nor1 * 1.0)\n\n # Then compute the derivative of the second component of the loss function\n z_init = np.reshape(z_init, x.shape)\n\n if norm == 1:\n z_d1 = np.sign(z_init[1:, :] - z_init[:-1, :])\n z_d2 = np.sign(z_init[:, 1:] - z_init[:, :-1])\n else:\n z_d1_norm = np.power(np.linalg.norm(z_init[1:, :] - z_init[:-1, :], norm, axis=1), norm - 1)\n z_d2_norm = np.power(np.linalg.norm(z_init[:, 1:] - z_init[:, :-1], norm, axis=0), norm - 1)\n z_d1_norm[z_d1_norm < 1e-6] = 1e-6\n z_d2_norm[z_d2_norm < 1e-6] = 1e-6\n z_d1_norm = np.repeat(z_d1_norm[:, np.newaxis], z_init.shape[1], axis=1)\n z_d2_norm = np.repeat(z_d2_norm[np.newaxis, :], z_init.shape[0], axis=0)\n z_d1 = norm * np.power(z_init[1:, :] - z_init[:-1, :], norm - 1) / z_d1_norm\n z_d2 = norm * np.power(z_init[:, 1:] - z_init[:, :-1], norm - 1) / z_d2_norm\n\n der2 = np.zeros(z_init.shape)\n der2[:-1, :] -= z_d1\n der2[1:, :] += z_d1\n der2[:, :-1] -= z_d2\n der2[:, 1:] += z_d2\n der2 = lamb * der2.flatten()\n\n # Total derivative\n return der1 + der2\n\n def _check_params(self) -> None:\n if not isinstance(self.prob, (float, int)) or self.prob < 0.0 or self.prob > 1.0:\n logger.error(\"Probability must be between 0 and 1.\")\n raise ValueError(\"Probability must be between 0 and 1.\")\n\n if not isinstance(self.norm, (int, np.int)) or self.norm <= 0:\n logger.error(\"Norm must be a positive integer.\")\n raise ValueError(\"Norm must be a positive integer.\")\n\n if not (self.solver == \"L-BFGS-B\" or self.solver == \"CG\" or self.solver == \"Newton-CG\"):\n logger.error(\"Current support only L-BFGS-B, CG, Newton-CG.\")\n raise ValueError(\"Current support only L-BFGS-B, CG, Newton-CG.\")\n\n if not isinstance(self.max_iter, (int, np.int)) or self.max_iter <= 0:\n logger.error(\"Number of iterations must be a positive integer.\")\n raise ValueError(\"Number of iterations must be a positive integer.\")\n\n if self.clip_values is not None:\n\n if len(self.clip_values) != 2:\n raise ValueError(\"`clip_values` should be a tuple of 2 floats containing the allowed data range.\")\n\n if np.array(self.clip_values[0] >= self.clip_values[1]).any():\n raise ValueError(\"Invalid `clip_values`: min >= max.\")\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements clean-label attacks on Neural Networks.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import reduce\nimport logging\nfrom typing import Optional, Tuple, Union, TYPE_CHECKING\n\nimport numpy as np\nfrom tqdm import trange\n\nfrom art.attacks.attack import PoisoningAttackWhiteBox\nfrom art.estimators import BaseEstimator, NeuralNetworkMixin\nfrom art.estimators.classification.classifier import ClassifierMixin\nfrom art.estimators.classification.keras import KerasClassifier\n\nif TYPE_CHECKING:\n from art.utils import CLASSIFIER_NEURALNETWORK_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass FeatureCollisionAttack(PoisoningAttackWhiteBox):\n \"\"\"\n Close implementation of Feature Collision Poisoning Attack by Shafahi, Huang, et al 2018.\n \"Poison Frogs! Targeted Clean-Label Poisoning Attacks on Neural Networks\"\n\n This implementation dynamically calculates the dimension of the feature layer, and doesn't hardcode this\n value to 2048 as done in the paper. Thus we recommend using larger values for the similarity_coefficient.\n\n | Paper link: https://arxiv.org/abs/1804.00792\n \"\"\"\n\n attack_params = PoisoningAttackWhiteBox.attack_params + [\n \"target\",\n \"feature_layer\",\n \"learning_rate\",\n \"decay_coeff\",\n \"stopping_tol\",\n \"obj_threshold\",\n \"num_old_obj\",\n \"max_iter\",\n \"similarity_coeff\",\n \"watermark\",\n \"verbose\",\n ]\n\n _estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassifierMixin, KerasClassifier)\n\n def __init__(\n self,\n classifier: \"CLASSIFIER_NEURALNETWORK_TYPE\",\n target: np.ndarray,\n feature_layer: Union[str, int],\n learning_rate: float = 500 * 255.0,\n decay_coeff: float = 0.5,\n stopping_tol: float = 1e-10,\n obj_threshold: Optional[float] = None,\n num_old_obj: int = 40,\n max_iter: int = 120,\n similarity_coeff: float = 256.0,\n watermark: Optional[float] = None,\n verbose: bool = True,\n ):\n \"\"\"\n Initialize an Feature Collision Clean-Label poisoning attack\n\n :param classifier: A trained neural network classifier.\n :param target: The target input to misclassify at test time.\n :param feature_layer: The name of the feature representation layer.\n :param learning_rate: The learning rate of clean-label attack optimization.\n :param decay_coeff: The decay coefficient of the learning rate.\n :param stopping_tol: Stop iterations after changes in attacks in less than this threshold.\n :param obj_threshold: Stop iterations after changes in objectives values are less than this threshold.\n :param num_old_obj: The number of old objective values to store.\n :param max_iter: The maximum number of iterations for the attack.\n :param similarity_coeff: The maximum number of iterations for the attack.\n :param watermark: Whether The opacity of the watermarked target image.\n :param verbose: Show progress bars.\n \"\"\"\n super().__init__(classifier=classifier) # type: ignore\n self.target = target\n self.feature_layer = feature_layer\n self.learning_rate = learning_rate\n self.decay_coeff = decay_coeff\n self.stopping_tol = stopping_tol\n self.obj_threshold = obj_threshold\n self.num_old_obj = num_old_obj\n self.max_iter = max_iter\n self.similarity_coeff = similarity_coeff\n self.watermark = watermark\n self.verbose = verbose\n self._check_params()\n\n self.target_placeholder, self.target_feature_rep = self.estimator.get_activations(\n self.target, self.feature_layer, 1, framework=True\n )\n self.poison_placeholder, self.poison_feature_rep = self.estimator.get_activations(\n self.target, self.feature_layer, 1, framework=True\n )\n self.attack_loss = tensor_norm(self.poison_feature_rep - self.target_feature_rep)\n\n def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Iteratively finds optimal attack points starting at values at x\n\n :param x: The base images to begin the poison process.\n :param y: Not used in this attack (clean-label).\n :return: An tuple holding the (poisoning examples, poisoning labels).\n \"\"\"\n num_poison = len(x)\n final_attacks = []\n if num_poison == 0:\n raise ValueError(\"Must input at least one poison point\")\n\n target_features = self.estimator.get_activations(self.target, self.feature_layer, 1)\n for init_attack in x:\n old_attack = np.expand_dims(np.copy(init_attack), axis=0)\n poison_features = self.estimator.get_activations(old_attack, self.feature_layer, 1)\n old_objective = self.objective(poison_features, target_features, init_attack, old_attack)\n last_m_objectives = [old_objective]\n\n for i in trange(self.max_iter, desc=\"Feature collision\", disable=not self.verbose):\n # forward step\n new_attack = self.forward_step(old_attack)\n\n # backward step\n new_attack = self.backward_step(np.expand_dims(init_attack, axis=0), poison_features, new_attack)\n\n rel_change_val = np.linalg.norm(new_attack - old_attack) / np.linalg.norm(new_attack)\n if rel_change_val < self.stopping_tol or self.obj_threshold and old_objective <= self.obj_threshold:\n logger.info(\"stopped after %d iterations due to small changes\", i)\n break\n\n np.expand_dims(new_attack, axis=0)\n new_feature_rep = self.estimator.get_activations(new_attack, self.feature_layer, 1)\n new_objective = self.objective(new_feature_rep, target_features, init_attack, new_attack)\n\n avg_of_last_m = sum(last_m_objectives) / float(min(self.num_old_obj, i + 1))\n\n # Increasing objective means then learning rate is too big. Chop it, and throw out the latest iteration\n if new_objective >= avg_of_last_m and (i % self.num_old_obj / 2 == 0):\n self.learning_rate *= self.decay_coeff\n else:\n old_attack = new_attack\n old_objective = new_objective\n\n if i < self.num_old_obj - 1:\n last_m_objectives.append(new_objective)\n else:\n # first remove the oldest obj then append the new obj\n del last_m_objectives[0]\n last_m_objectives.append(new_objective)\n\n # Watermarking\n watermark = self.watermark * self.target if self.watermark else 0\n final_poison = np.clip(old_attack + watermark, *self.estimator.clip_values)\n final_attacks.append(final_poison)\n\n return np.vstack(final_attacks), self.estimator.predict(x)\n\n def forward_step(self, poison: np.ndarray) -> np.ndarray:\n \"\"\"\n Forward part of forward-backward splitting algorithm.\n\n :param poison: the current poison samples.\n :return: poison example closer in feature representation to target space.\n \"\"\"\n (attack_grad,) = self.estimator.custom_loss_gradient(\n self.attack_loss,\n [self.poison_placeholder, self.target_placeholder],\n [poison, self.target],\n name=\"feature_collision_\" + str(self.feature_layer),\n )\n poison -= self.learning_rate * attack_grad[0]\n\n return poison\n\n def backward_step(self, base: np.ndarray, feature_rep: np.ndarray, poison: np.ndarray) -> np.ndarray:\n \"\"\"\n Backward part of forward-backward splitting algorithm\n\n :param base: The base image that the poison was initialized with.\n :param feature_rep: Numpy activations at the target layer.\n :param poison: The current poison samples.\n :return: Poison example closer in feature representation to target space.\n \"\"\"\n num_features = reduce(lambda x, y: x * y, base.shape)\n dim_features = feature_rep.shape[-1]\n beta = self.similarity_coeff * (dim_features / num_features) ** 2\n poison = (poison + self.learning_rate * beta * base) / (1 + beta * self.learning_rate)\n low, high = self.estimator.clip_values\n return np.clip(poison, low, high)\n\n def objective(\n self, poison_feature_rep: np.ndarray, target_feature_rep: np.ndarray, base_image: np.ndarray, poison: np.ndarray\n ) -> float:\n \"\"\"\n Objective function of the attack\n\n :param poison_feature_rep: The numpy activations of the poison image.\n :param target_feature_rep: The numpy activations of the target image.\n :param base_image: The initial image used to poison.\n :param poison: The current poison image.\n :return: The objective of the optimization.\n \"\"\"\n num_features = base_image.size\n num_activations = poison_feature_rep.size\n beta = self.similarity_coeff * (num_activations / num_features) ** 2\n return np.linalg.norm(poison_feature_rep - target_feature_rep) + beta * np.linalg.norm(poison - base_image)\n\n def _check_params(self) -> None:\n if self.learning_rate <= 0:\n raise ValueError(\"Learning rate must be strictly positive\")\n\n if self.max_iter < 1:\n raise ValueError(\"Value of max_iter at least 1\")\n\n if not isinstance(self.feature_layer, (str, int)):\n raise TypeError(\"Feature layer should be a string or int\")\n\n if self.decay_coeff <= 0:\n raise ValueError(\"Decay coefficient must be positive\")\n\n if self.stopping_tol <= 0:\n raise ValueError(\"Stopping tolerance must be positive\")\n\n if self.obj_threshold and self.obj_threshold <= 0:\n raise ValueError(\"Objective threshold must be positive\")\n\n if self.num_old_obj <= 0:\n raise ValueError(\"Number of old stored objectives must be positive\")\n\n if self.max_iter <= 0:\n raise ValueError(\"Number of old stored objectives must be positive\")\n\n if self.watermark and not (isinstance(self.watermark, float) and 0 <= self.watermark < 1):\n raise ValueError(\"Watermark must be between 0 and 1\")\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n\n\ndef get_class_name(obj: object) -> str:\n \"\"\"\n Get the full class name of an object.\n\n :param obj: A Python object.\n :return: A qualified class name.\n \"\"\"\n module = obj.__class__.__module__\n\n if module is None or module == str.__class__.__module__:\n return obj.__class__.__name__\n\n return module + \".\" + obj.__class__.__name__\n\n\ndef tensor_norm(tensor, norm_type: Union[int, float, str] = 2):\n \"\"\"\n Compute the norm of a tensor.\n\n :param tensor: A tensor from a supported ART neural network.\n :param norm_type: Order of the norm.\n :return: A tensor with the norm applied.\n \"\"\"\n tf_tensor_types = (\"tensorflow.python.framework.ops.Tensor\", \"tensorflow.python.framework.ops.EagerTensor\")\n torch_tensor_types = ()\n mxnet_tensor_types = ()\n supported_types = tf_tensor_types + torch_tensor_types + mxnet_tensor_types\n tensor_type = get_class_name(tensor)\n if tensor_type not in supported_types:\n raise TypeError(\"Tensor type `\" + tensor_type + \"` is not supported\")\n elif tensor_type in tf_tensor_types:\n import tensorflow as tf\n\n return tf.norm(tensor, ord=norm_type)\n elif tensor_type in torch_tensor_types:\n import torch\n\n return torch.norm(tensor, p=norm_type)\n elif tensor_type in mxnet_tensor_types:\n import mxnet\n\n return mxnet.ndarray.norm(tensor, ord=norm_type)\n",
"# pred is the list of predicted labels, sensitive is the list of labels for sensitive attribute\n# unprotected_vals is the label for unprotected group. e.g. 1 means unprivilege, 0 means privilege\n# positive_pred is the favorable result in prediction task. e.g. get approved for a loan\ndef DIbinary(pred,sensitive,unprotected_vals,positive_pred):\n unprotected_positive = 0.0\n unprotected_negative = 0.0\n protected_positive = 0.0\n protected_negative = 0.0\n for i in range(0, len(pred)):\n protected_val = sensitive[i]\n predicted_val = pred[i]\n # when someone is in unprotected group\n if protected_val in unprotected_vals:\n if int(predicted_val) == int(positive_pred):\n unprotected_positive += 1\n else:\n unprotected_negative += 1\n # the person is in protected group ie male = 0\n else:\n if int(predicted_val) == int(positive_pred):\n protected_positive += 1\n else:\n protected_negative += 1\n protected_pos_percent = 0.0\n if protected_positive + protected_negative > 0:\n protected_pos_percent = protected_positive / (protected_positive + protected_negative)\n unprotected_pos_percent = 0.0\n if unprotected_positive + unprotected_negative > 0:\n unprotected_pos_percent = unprotected_positive / \\\n (unprotected_positive + unprotected_negative)\n return unprotected_pos_percent, protected_pos_percent\n\n\n\ndef CV(pred,sensitive,unprotected_vals,positive_pred):\n protected_pos_percent, unprotected_pos_percent = DIbinary(pred, sensitive, unprotected_vals,positive_pred)\n cv = unprotected_pos_percent -protected_pos_percent\n return 1 - cv\n\n# acutal is the list of actual labels, pred is the list of predicted labels.\n# sensitive is the column of sensitive attribute, target_group is s in S = s\n# positive_pred is the favorable result in prediction task. e.g. get approved for a loan\ndef calibration_pos(actual,pred,sensitive,target_group,positive_pred):\n tot_pred_pos = 0\n act_pos = 0\n for act, pred_val, sens in zip(actual, pred,sensitive):\n # the case S != s\n if sens != target_group:\n continue\n else:\n # Yhat = 1\n if pred_val == positive_pred:\n tot_pred_pos += 1\n # the case both Yhat = 1 and Y = 1\n if act == positive_pred:\n act_pos +=1\n if act_pos == 0 and tot_pred_pos ==0:\n return 1\n if tot_pred_pos == 0:\n return 0\n return act_pos/tot_pred_pos\n\nfrom sklearn.metrics import confusion_matrix\n\n# acutal is the list of actual labels, pred is the list of predicted labels.\n# positive_pred is the favorable result in prediction task. e.g. get approved for a loan\ndef TNR(actual,pred,positive_pred):\n classes = list(set(actual))\n matrix = confusion_matrix(actual, pred, labels=classes)\n TN = 0.0\n allN = 0.0\n for i in range(0, len(classes)):\n trueval = classes[i]\n if trueval == positive_pred:\n continue\n for j in range(0, len(classes)):\n allN += matrix[i][j]\n predval = classes[j]\n if trueval == predval:\n TN += matrix[i][j]\n if allN == 0.0:\n return 1.0\n return TN / allN\n\n\nfrom sklearn.metrics import recall_score\n# acutal is the list of actual labels, pred is the list of predicted labels.\n# positive_pred is the favorable result in prediction task. e.g. get approved for a loan\ndef TPR(actual,pred,positive_pred):\n return recall_score(actual,pred,pos_label = positive_pred,average = 'binary')\n\n\n# acutal is the list of actual labels, pred is the list of predicted labels.\n# positive_pred is the favorable result in prediction task. e.g. get approved for a loan\ndef get_BCR(actual,pred,positive_pred):\n tpr_val = TPR(actual,pred,positive_pred)\n tnr_val = TNR(actual,pred,positive_pred)\n bcr = (tpr_val + tnr_val) / 2.0\n return bcr\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport numpy as np\nimport pytest\n\nfrom art.attacks.evasion import FastGradientMethod, FrameSaliencyAttack\nfrom art.estimators.estimator import BaseEstimator, LossGradientsMixin\n\nfrom tests.utils import ExpectedValue\nfrom tests.attacks.utils import backend_check_adverse_values, backend_check_adverse_frames\nfrom tests.attacks.utils import backend_test_classifier_type_check_fail\nfrom tests.utils import ARTTestException\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef fix_get_mnist_subset(get_mnist_dataset):\n (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_mnist_dataset\n n_train = 100\n n_test = 11\n yield x_train_mnist[:n_train], y_train_mnist[:n_train], x_test_mnist[:n_test], y_test_mnist[:n_test]\n\n\[email protected](\"pytorch\")\[email protected]_agnostic\ndef test_one_shot(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):\n try:\n classifier = image_dl_estimator_for_attack(FastGradientMethod)\n\n # for the one-shot method, frame saliency attack should resort to plain FastGradientMethod\n expected_values = {\n \"x_test_mean\": ExpectedValue(0.2346725, 0.002),\n \"x_test_min\": ExpectedValue(-1.0, 0.00001),\n \"x_test_max\": ExpectedValue(1.0, 0.00001),\n \"y_test_pred_adv_expected\": ExpectedValue(np.asarray([4, 4, 4, 7, 7, 4, 7, 2, 2, 3, 0]), 2),\n }\n\n attacker = FastGradientMethod(classifier, eps=1.0, batch_size=128)\n attack = FrameSaliencyAttack(classifier, attacker, \"one_shot\")\n backend_check_adverse_values(attack, fix_get_mnist_subset, expected_values)\n except ARTTestException as e:\n art_warning(e)\n\n\[email protected](\"pytorch\")\[email protected]_agnostic\ndef test_iterative_saliency(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):\n try:\n classifier = image_dl_estimator_for_attack(FastGradientMethod)\n\n expected_values_axis_1 = {\n \"nb_perturbed_frames\": ExpectedValue(np.asarray([10, 1, 2, 12, 16, 1, 2, 7, 4, 11, 5]), 2)\n }\n\n expected_values_axis_2 = {\n \"nb_perturbed_frames\": ExpectedValue(np.asarray([11, 1, 2, 6, 14, 2, 2, 13, 4, 8, 4]), 2)\n }\n\n attacker = FastGradientMethod(classifier, eps=0.3, batch_size=128)\n attack = FrameSaliencyAttack(classifier, attacker, \"iterative_saliency\")\n backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_1)\n\n # test with non-default frame index:\n attack = FrameSaliencyAttack(classifier, attacker, \"iterative_saliency\", frame_index=2)\n backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_2)\n except ARTTestException as e:\n art_warning(e)\n\n\[email protected](\"pytorch\")\[email protected]_agnostic\ndef test_iterative_saliency_refresh(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):\n try:\n classifier = image_dl_estimator_for_attack(FastGradientMethod)\n\n expected_values_axis_1 = {\n \"nb_perturbed_frames\": ExpectedValue(np.asarray([5, 1, 3, 10, 8, 1, 3, 8, 4, 7, 7]), 2)\n }\n\n expected_values_axis_2 = {\n \"nb_perturbed_frames\": ExpectedValue(np.asarray([11, 1, 2, 6, 14, 2, 2, 13, 4, 8, 4]), 2)\n }\n\n attacker = FastGradientMethod(classifier, eps=0.3, batch_size=128)\n attack = FrameSaliencyAttack(classifier, attacker, \"iterative_saliency_refresh\")\n backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_1)\n\n # test with non-default frame index:\n attack = FrameSaliencyAttack(classifier, attacker, \"iterative_saliency\", frame_index=2)\n backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_2)\n except ARTTestException as e:\n art_warning(e)\n\n\[email protected]_agnostic\ndef test_classifier_type_check_fail(art_warning):\n try:\n backend_test_classifier_type_check_fail(FastGradientMethod, [LossGradientsMixin, BaseEstimator])\n except ARTTestException as e:\n art_warning(e)\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements STRIP: A Defence Against Trojan Attacks on Deep Neural Networks.\n\n| Paper link: https://arxiv.org/abs/1902.06531\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import Callable, Optional\n\nimport numpy as np\nfrom scipy.stats import entropy, norm\nfrom tqdm import tqdm\n\nfrom art.estimators.certification.abstain import AbstainPredictorMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass STRIPMixin(AbstainPredictorMixin):\n \"\"\"\n Implementation of STRIP: A Defence Against Trojan Attacks on Deep Neural Networks (Gao et. al. 2020)\n\n | Paper link: https://arxiv.org/abs/1902.06531\n \"\"\"\n\n def __init__(\n self,\n predict_fn: Callable[[np.ndarray], np.ndarray],\n num_samples: int = 20,\n false_acceptance_rate: float = 0.01,\n **kwargs\n ) -> None:\n \"\"\"\n Create a STRIP defense\n\n :param predict_fn: The predict function of the original classifier\n :param num_samples: The number of samples to use to test entropy at inference time\n :param false_acceptance_rate: The percentage of acceptable false acceptance\n \"\"\"\n super().__init__(**kwargs)\n self.predict_fn = predict_fn\n self.num_samples = num_samples\n self.false_acceptance_rate = false_acceptance_rate\n self.entropy_threshold: Optional[float] = None\n self.validation_data: Optional[np.ndarray] = None\n\n def predict(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform prediction of the given classifier for a batch of inputs, potentially filtering suspicious input\n\n :param x: Test set.\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n \"\"\"\n raw_predictions = self.predict_fn(x)\n\n if self.entropy_threshold is None or self.validation_data is None:\n logger.warning(\"Mitigation has not been performed. Predictions may be unsafe.\")\n return raw_predictions\n\n x_val = self.validation_data\n final_predictions = []\n\n for i, img in enumerate(x):\n # Randomly select samples from test set\n selected_indices = np.random.choice(np.arange(len(x_val)), self.num_samples)\n\n # Perturb the images by combining them\n perturbed_images = np.array([combine_images(img, x_val[idx]) for idx in selected_indices])\n\n # Predict on the perturbed images\n perturbed_predictions = self.predict_fn(perturbed_images)\n\n # Calculate normalized entropy\n normalized_entropy = np.sum(entropy(perturbed_predictions, base=2, axis=0)) / float(self.num_samples)\n\n # Abstain if entropy is below threshold\n if normalized_entropy <= self.entropy_threshold:\n final_predictions.append(self.abstain())\n else:\n final_predictions.append(raw_predictions[i])\n\n return np.array(final_predictions)\n\n def mitigate(self, x_val: np.ndarray) -> None:\n \"\"\"\n Mitigates the effect of poison on a classifier\n\n :param x_val: Validation data to use to mitigate the effect of poison.\n \"\"\"\n self.validation_data = x_val\n entropies = []\n\n # Find normal entropy distribution\n for i, img in enumerate(tqdm(x_val)):\n selected_indices = np.random.choice(np.arange(len(x_val)), self.num_samples)\n perturbed_images = np.array([combine_images(img, x_val[idx]) for idx in selected_indices])\n perturbed_predictions = self.predict_fn(perturbed_images)\n normalized_entropy = np.sum(entropy(perturbed_predictions, base=2, axis=0)) / float(self.num_samples)\n entropies.append(normalized_entropy)\n\n mean_entropy, std_entropy = norm.fit(entropies)\n\n # Set threshold to FAR percentile\n self.entropy_threshold = norm.ppf(self.false_acceptance_rate, loc=mean_entropy, scale=std_entropy)\n if self.entropy_threshold < 0:\n logger.warning(\"Entropy value is negative. Increase FAR for reasonable performance.\")\n\n\ndef combine_images(img1: np.ndarray, img2: np.ndarray, alpha=0.5) -> np.ndarray:\n \"\"\"\n Combine two Numpy arrays of the same shape\n\n :param img1: a Numpy array\n :param img2: a Numpy array\n :param alpha: percentage weight for the first image\n :return: The combined image\n \"\"\"\n return alpha * img1 + (1 - alpha) * img2\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the Label-Only Inference Attack based on Decision Boundary.\n\n| Paper link: https://arxiv.org/abs/2007.14321\n\"\"\"\nimport logging\nfrom typing import Optional, NoReturn, TYPE_CHECKING\n\nimport numpy as np\n\nfrom art.attacks.attack import InferenceAttack\nfrom art.estimators.estimator import BaseEstimator\nfrom art.estimators.classification.classifier import ClassifierMixin\n\nif TYPE_CHECKING:\n from art.utils import CLASSIFIER_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass LabelOnlyDecisionBoundary(InferenceAttack):\n \"\"\"\n Implementation of Label-Only Inference Attack based on Decision Boundary.\n\n | Paper link: https://arxiv.org/abs/2007.14321\n \"\"\"\n\n attack_params = InferenceAttack.attack_params + [\n \"distance_threshold_tau\",\n ]\n _estimator_requirements = (BaseEstimator, ClassifierMixin)\n\n def __init__(self, estimator: \"CLASSIFIER_TYPE\", distance_threshold_tau: Optional[float] = None):\n \"\"\"\n Create a `LabelOnlyDecisionBoundary` instance for Label-Only Inference Attack based on Decision Boundary.\n\n :param estimator: A trained classification estimator.\n :param distance_threshold_tau: Threshold distance for decision boundary. Samples with boundary distances larger\n than threshold are considered members of the training dataset.\n \"\"\"\n super().__init__(estimator=estimator)\n self.distance_threshold_tau = distance_threshold_tau\n self._check_params()\n\n def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:\n \"\"\"\n Infer membership of input `x` in estimator's training data.\n\n :param x: Input data.\n :param y: True labels for `x`.\n :param kwargs: Parameters for HopSkipJump attack except argument `estimator`.\n :return: An array holding the inferred membership status, 1 indicates a member and 0 indicates non-member.\n \"\"\"\n from art.attacks.evasion.hop_skip_jump import HopSkipJump\n\n hsj = HopSkipJump(classifier=self.estimator, **kwargs)\n x_adv = hsj.generate(x=x, y=y)\n\n distance = np.linalg.norm((x_adv - x).reshape((x.shape[0], -1)), ord=2, axis=1)\n\n y_pred = self.estimator.predict(x=x)\n\n distance[np.argmax(y_pred, axis=1) != np.argmax(y, axis=1)] = 0\n\n is_member = np.where(distance > self.distance_threshold_tau, 1, 0)\n\n return is_member\n\n def calibrate_distance_threshold(\n self,\n classifier_train: \"CLASSIFIER_TYPE\",\n x_train: np.ndarray,\n y_train: np.ndarray,\n x_test: np.ndarray,\n y_test: np.ndarray,\n **kwargs\n ) -> NoReturn:\n \"\"\"\n Calibrate distance threshold maximising the membership inference accuracy on `x_train` and `x_test`.\n\n :param classifier_train: A trained classifier\n :param x_train: Training data.\n :param y_train: Labels of training data `x_train`.\n :param x_test: Test data.\n :param y_test: Labels of test data `x_test`.\n \"\"\"\n from art.attacks.evasion.hop_skip_jump import HopSkipJump\n\n hsj = HopSkipJump(classifier=classifier_train, **kwargs)\n\n x_train_adv = hsj.generate(x=x_train, y=y_train)\n x_test_adv = hsj.generate(x=x_test, y=y_test)\n\n distance_train = np.linalg.norm((x_train_adv - x_train).reshape((x_train.shape[0], -1)), ord=2, axis=1)\n distance_test = np.linalg.norm((x_test_adv - x_test).reshape((x_test.shape[0], -1)), ord=2, axis=1)\n\n y_train_pred = self.estimator.predict(x=x_train)\n y_test_pred = self.estimator.predict(x=x_test)\n\n distance_train[np.argmax(y_train_pred, axis=1) != np.argmax(y_train, axis=1)] = 0\n distance_test[np.argmax(y_test_pred, axis=1) != np.argmax(y_test, axis=1)] = 0\n\n num_increments = 100\n tau_increment = np.amax([np.amax(distance_train), np.amax(distance_test)]) / num_increments\n\n acc_max = 0.0\n distance_threshold_tau = 0.0\n\n for i_tau in range(1, num_increments):\n\n is_member_train = np.where(distance_train > i_tau * tau_increment, 1, 0)\n is_member_test = np.where(distance_test > i_tau * tau_increment, 1, 0)\n\n acc = (np.sum(is_member_train) + (is_member_test.shape[0] - np.sum(is_member_test))) / (\n is_member_train.shape[0] + is_member_test.shape[0]\n )\n\n if acc > acc_max:\n distance_threshold_tau = i_tau * tau_increment\n acc_max = acc\n\n self.distance_threshold_tau = distance_threshold_tau\n\n def _check_params(self) -> None:\n if not isinstance(self.distance_threshold_tau, (int, float)) or self.distance_threshold_tau <= 0.0:\n raise ValueError(\"The distance threshold `distance_threshold_tau` needs to be a positive float.\")\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the frame saliency attack framework. Originally designed for video data, this framework will\nprioritize which parts of a sequential input should be perturbed based on saliency scores.\n\n| Paper link: https://arxiv.org/abs/1811.11875\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import Optional, TYPE_CHECKING\n\nimport numpy as np\nfrom tqdm import trange\n\nfrom art.config import ART_NUMPY_DTYPE\nfrom art.estimators.estimator import BaseEstimator, NeuralNetworkMixin\nfrom art.estimators.classification.classifier import ClassGradientsMixin\nfrom art.attacks.attack import EvasionAttack\nfrom art.utils import (\n compute_success_array,\n get_labels_np_array,\n check_and_transform_label_format,\n)\n\nif TYPE_CHECKING:\n from art.utils import CLASSIFIER_NEURALNETWORK_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass FrameSaliencyAttack(EvasionAttack):\n \"\"\"\n Implementation of the attack framework proposed by Inkawhich et al. (2018). Prioritizes the frame of a sequential\n input to be adversarially perturbed based on the saliency score of each frame.\n\n | Paper link: https://arxiv.org/abs/1811.11875\n \"\"\"\n\n method_list = [\"iterative_saliency\", \"iterative_saliency_refresh\", \"one_shot\"]\n attack_params = EvasionAttack.attack_params + [\n \"attacker\",\n \"method\",\n \"frame_index\",\n \"batch_size\",\n \"verbose\",\n ]\n _estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassGradientsMixin)\n\n def __init__(\n self,\n classifier: \"CLASSIFIER_NEURALNETWORK_TYPE\",\n attacker: EvasionAttack,\n method: str = \"iterative_saliency\",\n frame_index: int = 1,\n batch_size: int = 1,\n verbose: bool = True,\n ):\n \"\"\"\n :param classifier: A trained classifier.\n :param attacker: An adversarial evasion attacker which supports masking. Currently supported:\n ProjectedGradientDescent, BasicIterativeMethod, FastGradientMethod.\n :param method: Specifies which method to use: \"iterative_saliency\" (adds perturbation iteratively to frame\n with highest saliency score until attack is successful), \"iterative_saliency_refresh\" (updates\n perturbation after each iteration), \"one_shot\" (adds all perturbations at once, i.e. defaults to\n original attack).\n :param frame_index: Index of the axis in input (feature) array `x` representing the frame dimension.\n :param batch_size: Size of the batch on which adversarial samples are generated.\n :param verbose: Show progress bars.\n \"\"\"\n super().__init__(estimator=classifier)\n\n self.attacker = attacker\n self.method = method\n self.frame_index = frame_index\n self.batch_size = batch_size\n self.verbose = verbose\n self._check_params()\n\n def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:\n \"\"\"\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs.\n :param y: An array with the original labels to be predicted.\n :return: An array holding the adversarial examples.\n \"\"\"\n if len(x.shape) < 3:\n raise ValueError(\"Frame saliency attack works only on inputs of dimension greater than 2.\")\n\n if self.frame_index >= len(x.shape):\n raise ValueError(\"Frame index is out of bounds for the given input shape.\")\n\n y = check_and_transform_label_format(y, nb_classes=self.estimator.nb_classes)\n\n if self.method == \"one_shot\":\n if y is None:\n return self.attacker.generate(x)\n else:\n return self.attacker.generate(x, y)\n\n if y is None:\n # Throw error if attack is targeted, but no targets are provided\n if hasattr(self.attacker, \"targeted\") and self.attacker.targeted: # type: ignore\n raise ValueError(\"Target labels `y` need to be provided for a targeted attack.\")\n\n # Use model predictions as correct outputs\n targets = get_labels_np_array(self.estimator.predict(x, batch_size=self.batch_size))\n else:\n targets = y\n\n nb_samples = x.shape[0]\n nb_frames = x.shape[self.frame_index]\n x_adv = x.astype(ART_NUMPY_DTYPE)\n\n # Determine for which adversarial examples the attack fails:\n attack_failure = self._compute_attack_failure_array(x, targets, x_adv)\n\n # Determine the order in which to perturb frames, based on saliency scores:\n frames_to_perturb = self._compute_frames_to_perturb(x_adv, targets)\n\n # Generate adversarial perturbations. If the method is \"iterative_saliency_refresh\", we will use a mask so that\n # only the next frame to be perturbed is considered in the attack; moreover we keep track of the next frames to\n # be perturbed so they will not be perturbed again later on.\n mask = np.ones(x.shape)\n if self.method == \"iterative_saliency_refresh\":\n mask = np.zeros(x.shape)\n mask = np.swapaxes(mask, 1, self.frame_index)\n mask[:, frames_to_perturb[:, 0], ::] = 1\n mask = np.swapaxes(mask, 1, self.frame_index)\n disregard = np.zeros((nb_samples, nb_frames))\n disregard[:, frames_to_perturb[:, 0]] = np.inf\n\n x_adv_new = self.attacker.generate(x, targets, mask=mask)\n\n # Here starts the main iteration:\n for i in trange(nb_frames, desc=\"Frame saliency\", disable=not self.verbose):\n # Check if attack has already succeeded for all inputs:\n if sum(attack_failure) == 0:\n break\n\n # Update designated frames with adversarial perturbations:\n x_adv = np.swapaxes(x_adv, 1, self.frame_index)\n x_adv_new = np.swapaxes(x_adv_new, 1, self.frame_index)\n x_adv[attack_failure, frames_to_perturb[:, i][attack_failure], ::] = x_adv_new[\n attack_failure, frames_to_perturb[:, i][attack_failure], ::\n ]\n x_adv = np.swapaxes(x_adv, 1, self.frame_index)\n x_adv_new = np.swapaxes(x_adv_new, 1, self.frame_index)\n\n # Update for which adversarial examples the attack still fails:\n attack_failure = self._compute_attack_failure_array(x, targets, x_adv)\n\n # For the \"refresh\" method, update the next frames to be perturbed (disregarding the frames that were\n # perturbed already) and also refresh the adversarial perturbations:\n if self.method == \"iterative_saliency_refresh\" and i < nb_frames - 1:\n frames_to_perturb = self._compute_frames_to_perturb(x_adv, targets, disregard)\n mask = np.zeros(x.shape)\n mask = np.swapaxes(mask, 1, self.frame_index)\n mask[:, frames_to_perturb[:, i + 1], ::] = 1\n mask = np.swapaxes(mask, 1, self.frame_index)\n disregard[:, frames_to_perturb[:, i + 1]] = np.inf\n x_adv_new = self.attacker.generate(x_adv, targets, mask=mask)\n\n return x_adv\n\n def _compute_attack_failure_array(self, x: np.ndarray, targets: np.ndarray, x_adv: np.ndarray) -> np.ndarray:\n attack_success = compute_success_array(\n self.attacker.estimator, x, targets, x_adv, self.attacker.targeted # type: ignore\n )\n return np.invert(attack_success)\n\n def _compute_frames_to_perturb(\n self, x_adv: np.ndarray, targets: np.ndarray, disregard: Optional[np.ndarray] = None\n ) -> np.ndarray:\n saliency_score = self.estimator.loss_gradient(x_adv, targets)\n saliency_score = np.swapaxes(saliency_score, 1, self.frame_index)\n saliency_score = saliency_score.reshape((saliency_score.shape[:2] + (np.prod(saliency_score.shape[2:]),)))\n saliency_score = np.mean(np.abs(saliency_score), axis=2)\n\n if disregard is not None:\n saliency_score += disregard\n\n return np.argsort(-saliency_score, axis=1)\n\n def _check_params(self) -> None:\n from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent\n from art.attacks.evasion.iterative_method import BasicIterativeMethod\n from art.attacks.evasion.fast_gradient import FastGradientMethod\n\n if not isinstance(self.attacker, (ProjectedGradientDescent, BasicIterativeMethod, FastGradientMethod)):\n raise ValueError(\n \"The attacker must be either of class 'ProjectedGradientDescent', 'BasicIterativeMethod' or \"\n \"'FastGradientMethod'\"\n )\n\n if self.method not in self.method_list:\n raise ValueError(\"Method must be either 'iterative_saliency', 'iterative_saliency_refresh' or 'one_shot'.\")\n\n if self.frame_index < 1:\n raise ValueError(\"The index `frame_index` of the frame dimension has to be >=1.\")\n\n if self.batch_size <= 0:\n raise ValueError(\"The batch size `batch_size` has to be positive.\")\n\n if not self.estimator == self.attacker.estimator:\n raise Warning(\"Different classifiers given for computation of saliency scores and adversarial noise.\")\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements a wrapper class for GPy Gaussian Process classification models.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nfrom typing import List, Optional, Union, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom art.estimators.classification.classifier import ClassifierClassLossGradients\nfrom art import config\n\nif TYPE_CHECKING:\n # pylint: disable=C0412\n from GPy.models import GPClassification\n\n from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE\n from art.defences.preprocessor import Preprocessor\n from art.defences.postprocessor import Postprocessor\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=C0103\nclass GPyGaussianProcessClassifier(ClassifierClassLossGradients):\n \"\"\"\n Wrapper class for GPy Gaussian Process classification models.\n \"\"\"\n\n def __init__(\n self,\n model: Optional[\"GPClassification\"] = None,\n clip_values: Optional[\"CLIP_VALUES_TYPE\"] = None,\n preprocessing_defences: Union[\"Preprocessor\", List[\"Preprocessor\"], None] = None,\n postprocessing_defences: Union[\"Postprocessor\", List[\"Postprocessor\"], None] = None,\n preprocessing: \"PREPROCESSING_TYPE\" = (0, 1),\n ) -> None:\n \"\"\"\n Create a `Classifier` instance GPY Gaussian Process classification models.\n\n :param model: GPY Gaussian Process Classification model.\n :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed\n for features.\n :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.\n :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.\n :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be\n used for data preprocessing. The first value will be subtracted from the input. The input will then\n be divided by the second one.\n \"\"\"\n from GPy.models import GPClassification\n\n if not isinstance(model, GPClassification):\n raise TypeError(\"Model must be of type GPy.models.GPClassification\")\n\n super().__init__(\n model=model,\n clip_values=clip_values,\n preprocessing_defences=preprocessing_defences,\n postprocessing_defences=postprocessing_defences,\n preprocessing=preprocessing,\n )\n self._nb_classes = 2 # always binary\n\n @property\n def input_shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return the shape of one input sample.\n\n :return: Shape of one input sample.\n \"\"\"\n return self._input_shape # type: ignore\n\n # pylint: disable=W0221\n def class_gradient( # type: ignore\n self, x: np.ndarray, label: Union[int, List[int], None] = None, eps: float = 0.0001,\n ) -> np.ndarray:\n \"\"\"\n Compute per-class derivatives w.r.t. `x`.\n\n :param x: Sample input with shape as expected by the model.\n :param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class\n output is computed for all samples. If multiple values as provided, the first dimension should\n match the batch size of `x`, and each value will be used as target for its corresponding sample in\n `x`. If `None`, then gradients for all classes will be computed for each sample.\n :param eps: Fraction added to the diagonal elements of the input `x`.\n :return: Array of gradients of input features w.r.t. each class in the form\n `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes\n `(batch_size, 1, input_shape)` when `label` parameter is specified.\n \"\"\"\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)\n\n grads = np.zeros((np.shape(x_preprocessed)[0], 2, np.shape(x)[1]))\n for i in range(np.shape(x_preprocessed)[0]):\n # Get gradient for the two classes GPC can maximally have\n for i_c in range(2):\n ind = self.predict(x[i].reshape(1, -1))[0, i_c]\n sur = self.predict(\n np.repeat(x_preprocessed[i].reshape(1, -1), np.shape(x_preprocessed)[1], 0)\n + eps * np.eye(np.shape(x_preprocessed)[1])\n )[:, i_c]\n grads[i, i_c] = ((sur - ind) * eps).reshape(1, -1)\n\n grads = self._apply_preprocessing_gradient(x, grads)\n\n if label is not None:\n return grads[:, label, :].reshape(np.shape(x_preprocessed)[0], 1, np.shape(x_preprocessed)[1])\n\n return grads\n\n def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:\n \"\"\"\n Compute the gradient of the loss function w.r.t. `x`.\n\n :param x: Sample input with shape as expected by the model.\n :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape\n `(nb_samples,)`.\n :return: Array of gradients of the same shape as `x`.\n \"\"\"\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y, fit=False)\n\n eps = 0.00001\n grads = np.zeros(np.shape(x))\n for i in range(np.shape(x)[0]):\n # 1.0 - to mimic loss, [0,np.argmax] to get right class\n ind = 1.0 - self.predict(x_preprocessed[i].reshape(1, -1))[0, np.argmax(y[i])]\n sur = (\n 1.0\n - self.predict(\n np.repeat(x_preprocessed[i].reshape(1, -1), np.shape(x_preprocessed)[1], 0)\n + eps * np.eye(np.shape(x_preprocessed)[1])\n )[:, np.argmax(y[i])]\n )\n grads[i] = ((sur - ind) * eps).reshape(1, -1)\n\n grads = self._apply_preprocessing_gradient(x, grads)\n\n return grads\n\n # pylint: disable=W0221\n def predict(self, x: np.ndarray, logits: bool = False, **kwargs) -> np.ndarray:\n \"\"\"\n Perform prediction for a batch of inputs.\n\n :param x: Test set.\n :param logits: `True` if the prediction should be done without squashing function.\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n \"\"\"\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)\n\n # Perform prediction\n out = np.zeros((np.shape(x_preprocessed)[0], 2))\n if logits:\n # output the non-squashed version\n out[:, 0] = self.model.predict_noiseless(x_preprocessed)[0].reshape(-1)\n out[:, 1] = -1.0 * out[:, 0]\n else:\n # output normal prediction, scale up to two values\n out[:, 0] = self.model.predict(x_preprocessed)[0].reshape(-1)\n out[:, 1] = 1.0 - out[:, 0]\n\n # Apply postprocessing\n predictions = self._apply_postprocessing(preds=out, fit=False)\n\n return predictions\n\n def predict_uncertainty(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform uncertainty prediction for a batch of inputs.\n\n :param x: Test set.\n :return: Array of uncertainty predictions of shape `(nb_inputs)`.\n \"\"\"\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)\n\n # Perform prediction\n out = self.model.predict_noiseless(x_preprocessed)[1]\n\n # Apply postprocessing\n predictions = self._apply_postprocessing(preds=out, fit=False)\n\n return predictions\n\n def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None:\n \"\"\"\n Fit the classifier on the training set `(x, y)`.\n\n :param x: Training data. Not used, as given to model in initialized earlier.\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).\n \"\"\"\n raise NotImplementedError\n\n def save(self, filename: str, path: Optional[str] = None) -> None:\n \"\"\"\n Save a model to file in the format specific to the backend framework.\n\n :param filename: Name of the file where to store the model.\n :param path: Path of the folder where to store the model. If no path is specified, the model will be stored in\n the default data location of the library `ART_DATA_PATH`.\n \"\"\"\n if path is None:\n full_path = os.path.join(config.ART_DATA_PATH, filename)\n else:\n full_path = os.path.join(path, filename)\n folder = os.path.split(full_path)[0]\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self.model.save_model(full_path, save_data=False)\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nfrom keras_preprocessing.image import ImageDataGenerator\nimport numpy as np\n\nfrom art.data_generators import KerasDataGenerator\nfrom art.defences.detector.poison import ActivationDefence\nfrom art.utils import load_mnist\nfrom art.visualization import convert_to_rgb\n\nfrom tests.utils import master_seed\n\nlogger = logging.getLogger(__name__)\n\nNB_TRAIN, NB_TEST, BATCH_SIZE = 300, 10, 128\n\n\nclass TestActivationDefence(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n\n (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()\n x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]\n cls.mnist = (x_train, y_train), (x_test, y_test), (min_, max_)\n\n # Create simple keras model\n import tensorflow as tf\n\n tf_version = [int(v) for v in tf.__version__.split(\".\")]\n if tf_version[0] == 2 and tf_version[1] >= 3:\n tf.compat.v1.disable_eager_execution()\n from tensorflow.keras import backend as k\n from tensorflow.keras.models import Sequential\n from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n else:\n import keras.backend as k\n from keras.models import Sequential\n from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n\n k.set_learning_phase(1)\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3), activation=\"relu\", input_shape=x_train.shape[1:]))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n model.add(Flatten())\n model.add(Dense(10, activation=\"softmax\"))\n\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n from art.estimators.classification.keras import KerasClassifier\n\n cls.classifier = KerasClassifier(model=model, clip_values=(min_, max_))\n\n cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128)\n\n cls.defence = ActivationDefence(cls.classifier, x_train, y_train)\n\n datagen = ImageDataGenerator()\n datagen.fit(x_train)\n\n data_gen = KerasDataGenerator(\n datagen.flow(x_train, y_train, batch_size=NB_TRAIN), size=NB_TRAIN, batch_size=NB_TRAIN\n )\n\n cls.defence_gen = ActivationDefence(cls.classifier, None, None, generator=data_gen)\n\n def setUp(self):\n # Set master seed\n master_seed(1234)\n\n @unittest.expectedFailure\n def test_wrong_parameters_1(self):\n self.defence.set_params(nb_clusters=0)\n\n @unittest.expectedFailure\n def test_wrong_parameters_2(self):\n self.defence.set_params(clustering_method=\"what\")\n\n @unittest.expectedFailure\n def test_wrong_parameters_3(self):\n self.defence.set_params(reduce=\"what\")\n\n @unittest.expectedFailure\n def test_wrong_parameters_4(self):\n self.defence.set_params(cluster_analysis=\"what\")\n\n def test_activations(self):\n (x_train, _), (_, _), (_, _) = self.mnist\n activations = self.defence._get_activations()\n self.assertEqual(len(x_train), len(activations))\n\n def test_output_clusters(self):\n # Get MNIST\n (x_train, _), (_, _), (_, _) = self.mnist\n\n n_classes = self.classifier.nb_classes\n for nb_clusters in range(2, 5):\n clusters_by_class, _ = self.defence.cluster_activations(nb_clusters=nb_clusters)\n\n # Verify expected number of classes\n self.assertEqual(np.shape(clusters_by_class)[0], n_classes)\n # Check we get the expected number of clusters:\n found_clusters = len(np.unique(clusters_by_class[0]))\n self.assertEqual(found_clusters, nb_clusters)\n # Check right amount of data\n n_dp = 0\n for i in range(0, n_classes):\n n_dp += len(clusters_by_class[i])\n self.assertEqual(len(x_train), n_dp)\n\n def test_detect_poison(self):\n # Get MNIST\n (x_train, _), (_, _), (_, _) = self.mnist\n\n _, is_clean_lst = self.defence.detect_poison(nb_clusters=2, nb_dims=10, reduce=\"PCA\")\n sum_clean1 = sum(is_clean_lst)\n\n _, is_clean_lst_gen = self.defence_gen.detect_poison(nb_clusters=2, nb_dims=10, reduce=\"PCA\")\n sum_clean1_gen = sum(is_clean_lst_gen)\n\n # Check number of items in is_clean\n self.assertEqual(len(x_train), len(is_clean_lst))\n self.assertEqual(len(x_train), len(is_clean_lst_gen))\n\n # Test right number of clusters\n found_clusters = len(np.unique(self.defence.clusters_by_class[0]))\n found_clusters_gen = len(np.unique(self.defence_gen.clusters_by_class[0]))\n self.assertEqual(found_clusters, 2)\n self.assertEqual(found_clusters_gen, 2)\n\n _, is_clean_lst = self.defence.detect_poison(\n nb_clusters=3, nb_dims=10, reduce=\"PCA\", cluster_analysis=\"distance\"\n )\n _, is_clean_lst_gen = self.defence_gen.detect_poison(\n nb_clusters=3, nb_dims=10, reduce=\"PCA\", cluster_analysis=\"distance\"\n )\n self.assertEqual(len(x_train), len(is_clean_lst))\n self.assertEqual(len(x_train), len(is_clean_lst_gen))\n\n # Test change of state to new number of clusters:\n found_clusters = len(np.unique(self.defence.clusters_by_class[0]))\n found_clusters_gen = len(np.unique(self.defence_gen.clusters_by_class[0]))\n self.assertEqual(found_clusters, 3)\n self.assertEqual(found_clusters_gen, 3)\n\n # Test clean data has changed\n sum_clean2 = sum(is_clean_lst)\n sum_clean2_gen = sum(is_clean_lst_gen)\n self.assertNotEqual(sum_clean1, sum_clean2)\n self.assertNotEqual(sum_clean1_gen, sum_clean2_gen)\n\n kwargs = {\"nb_clusters\": 2, \"nb_dims\": 10, \"reduce\": \"PCA\", \"cluster_analysis\": \"distance\"}\n _, is_clean_lst = self.defence.detect_poison(**kwargs)\n _, is_clean_lst_gen = self.defence_gen.detect_poison(**kwargs)\n sum_dist = sum(is_clean_lst)\n sum_dist_gen = sum(is_clean_lst_gen)\n kwargs = {\"nb_clusters\": 2, \"nb_dims\": 10, \"reduce\": \"PCA\", \"cluster_analysis\": \"smaller\"}\n _, is_clean_lst = self.defence.detect_poison(**kwargs)\n _, is_clean_lst_gen = self.defence_gen.detect_poison(**kwargs)\n sum_size = sum(is_clean_lst)\n sum_size_gen = sum(is_clean_lst_gen)\n self.assertNotEqual(sum_dist, sum_size)\n self.assertNotEqual(sum_dist_gen, sum_size_gen)\n\n def test_evaluate_defense(self):\n # Get MNIST\n (x_train, _), (_, _), (_, _) = self.mnist\n\n kwargs = {\"nb_clusters\": 2, \"nb_dims\": 10, \"reduce\": \"PCA\"}\n _, _ = self.defence.detect_poison(**kwargs)\n _, _ = self.defence_gen.detect_poison(**kwargs)\n is_clean = np.zeros(len(x_train))\n self.defence.evaluate_defence(is_clean)\n self.defence_gen.evaluate_defence(is_clean)\n\n def test_analyze_cluster(self):\n # Get MNIST\n (x_train, _), (_, _), (_, _) = self.mnist\n\n self.defence.analyze_clusters(cluster_analysis=\"relative-size\")\n self.defence_gen.analyze_clusters(cluster_analysis=\"relative-size\")\n\n self.defence.analyze_clusters(cluster_analysis=\"silhouette-scores\")\n self.defence_gen.analyze_clusters(cluster_analysis=\"silhouette-scores\")\n\n report, dist_clean_by_class = self.defence.analyze_clusters(cluster_analysis=\"distance\")\n report_gen, dist_clean_by_class_gen = self.defence_gen.analyze_clusters(cluster_analysis=\"distance\")\n n_classes = self.classifier.nb_classes\n self.assertEqual(n_classes, len(dist_clean_by_class))\n self.assertEqual(n_classes, len(dist_clean_by_class_gen))\n\n # Check right amount of data\n n_dp = 0\n n_dp_gen = 0\n for i in range(0, n_classes):\n n_dp += len(dist_clean_by_class[i])\n n_dp_gen += len(dist_clean_by_class_gen[i])\n self.assertEqual(len(x_train), n_dp)\n self.assertEqual(len(x_train), n_dp_gen)\n\n report, sz_clean_by_class = self.defence.analyze_clusters(cluster_analysis=\"smaller\")\n report_gen, sz_clean_by_class_gen = self.defence_gen.analyze_clusters(cluster_analysis=\"smaller\")\n n_classes = self.classifier.nb_classes\n self.assertEqual(n_classes, len(sz_clean_by_class))\n self.assertEqual(n_classes, len(sz_clean_by_class_gen))\n\n # Check right amount of data\n n_dp = 0\n n_dp_gen = 0\n sum_sz = 0\n sum_sz_gen = 0\n sum_dis = 0\n sum_dis_gen = 0\n\n for i in range(0, n_classes):\n n_dp += len(sz_clean_by_class[i])\n n_dp_gen += len(sz_clean_by_class_gen[i])\n sum_sz += sum(sz_clean_by_class[i])\n sum_sz_gen += sum(sz_clean_by_class_gen[i])\n sum_dis += sum(dist_clean_by_class[i])\n sum_dis_gen += sum(dist_clean_by_class_gen[i])\n self.assertEqual(len(x_train), n_dp)\n self.assertEqual(len(x_train), n_dp_gen)\n\n # Very unlikely that they are the same\n self.assertNotEqual(sum_dis, sum_sz, msg=\"This is very unlikely to happen... there may be an error\")\n self.assertNotEqual(sum_dis_gen, sum_sz_gen, msg=\"This is very unlikely to happen... there may be an error\")\n\n def test_plot_clusters(self):\n self.defence.detect_poison(nb_clusters=2, nb_dims=10, reduce=\"PCA\")\n self.defence_gen.detect_poison(nb_clusters=2, nb_dims=10, reduce=\"PCA\")\n self.defence.plot_clusters(save=False)\n self.defence_gen.plot_clusters(save=False)\n\n def test_pickle(self):\n\n # Test pickle and unpickle:\n filename = \"test_pickle.h5\"\n ActivationDefence._pickle_classifier(self.classifier, filename)\n loaded = ActivationDefence._unpickle_classifier(filename)\n\n np.testing.assert_equal(self.classifier._clip_values, loaded._clip_values)\n self.assertEqual(self.classifier._channels_first, loaded._channels_first)\n self.assertEqual(self.classifier._use_logits, loaded._use_logits)\n self.assertEqual(self.classifier._input_layer, loaded._input_layer)\n\n ActivationDefence._remove_pickle(filename)\n\n def test_fix_relabel_poison(self):\n (x_train, y_train), (_, _), (_, _) = self.mnist\n x_poison = x_train[:100]\n y_fix = y_train[:100]\n\n test_set_split = 0.7\n n_train = int(len(x_poison) * test_set_split)\n x_test = x_poison[n_train:]\n y_test = y_fix[n_train:]\n\n predictions = np.argmax(self.classifier.predict(x_test), axis=1)\n ini_miss = 1 - np.sum(predictions == np.argmax(y_test, axis=1)) / y_test.shape[0]\n\n improvement, new_classifier = ActivationDefence.relabel_poison_ground_truth(\n self.classifier,\n x_poison,\n y_fix,\n test_set_split=test_set_split,\n tolerable_backdoor=0.01,\n max_epochs=5,\n batch_epochs=10,\n )\n\n predictions = np.argmax(new_classifier.predict(x_test), axis=1)\n final_miss = 1 - np.sum(predictions == np.argmax(y_test, axis=1)) / y_test.shape[0]\n\n self.assertEqual(improvement, ini_miss - final_miss)\n\n # Other method (since it's cross validation we can't assert to a concrete number).\n improvement, _ = ActivationDefence.relabel_poison_cross_validation(\n self.classifier, x_poison, y_fix, n_splits=2, tolerable_backdoor=0.01, max_epochs=5, batch_epochs=10\n )\n self.assertGreaterEqual(improvement, 0)\n\n def test_visualizations(self):\n # test that visualization doesn't error in grayscale and RGB settings\n (x_train, _), (_, _), (_, _) = self.mnist\n self.defence.visualize_clusters(x_train)\n\n x_train_rgb = convert_to_rgb(x_train)\n self.defence.visualize_clusters(x_train_rgb)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.clip",
"numpy.reshape",
"numpy.power",
"numpy.linalg.norm",
"numpy.sign",
"numpy.random.rand",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.norm",
"numpy.expand_dims",
"torch.norm",
"numpy.clip",
"numpy.linalg.norm",
"numpy.copy",
"numpy.vstack"
],
[
"sklearn.metrics.recall_score",
"sklearn.metrics.confusion_matrix"
],
[
"numpy.asarray"
],
[
"scipy.stats.norm.fit",
"scipy.stats.norm.ppf",
"numpy.array",
"scipy.stats.entropy"
],
[
"numpy.amax",
"numpy.argmax",
"numpy.where",
"numpy.sum"
],
[
"numpy.swapaxes",
"numpy.abs",
"numpy.invert",
"numpy.ones",
"numpy.prod",
"numpy.argsort",
"numpy.zeros"
],
[
"numpy.argmax",
"numpy.shape"
],
[
"numpy.testing.assert_equal",
"tensorflow.__version__.split",
"numpy.unique",
"numpy.argmax",
"numpy.shape",
"tensorflow.compat.v1.disable_eager_execution"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
meta00/vital_sqi | [
"7e64a26c9d56af26bfbd25c3ba30211414f5f845"
] | [
"vital_sqi/sqi/dtw_sqi.py"
] | [
"import numpy as np\nimport sys\nimport os\nif bool(getattr(sys, 'ps1', sys.flags.interactive)):\n old_stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n from dtw import dtw\n sys.stdout = old_stdout\nelse:\n from dtw import dtw\n\nfrom vital_sqi.common.generate_template import (\n ppg_absolute_dual_skewness_template,\n ppg_dual_double_frequency_template,\n ppg_nonlinear_dynamic_system_template,\n ecg_dynamic_template\n )\nfrom vital_sqi.common.utils import check_valid_signal\nfrom scipy.spatial.distance import euclidean\n\ndef compute_dtw_distance(input_sequence, template_sequence):\n dtw_distances = np.ones((len(input_sequence),len(template_sequence))) * np.inf\n #first matching sample is set to zero\n dtw_distances[0,0] = 0\n for i in range(len(input_sequence)):\n for j in range(len(template_sequence)):\n cost = euclidean(input_sequence[i],template_sequence[j])\n # dtw_distances\n\n\n\ndef dtw_sqi(x, template_type=0):\n \"\"\"Using DTW to get the mapping point distance between a signal and its\n template. The DTW SQI is the ratio of the distance sum to\n the trace of cost matrix. The closer to 1 the better SQI.\n\n Parameters\n ----------\n x :\n array_like, signal containing int or float values.\n template_type :\n int,\n 0: ppg_absolute_dual_skewness_template,\n 1: ppg_dual_double_frequency_template,\n 2: ppg_nonlinear_dynamic_system_template,\n 3: ecg_dynamic_template\n default = 0\n\n Returns\n -------\n\n \"\"\"\n check_valid_signal(x)\n if template_type > 3 or type(template_type) != int:\n raise ValueError(\"Invalid template type\")\n if template_type == 0:\n reference = ppg_nonlinear_dynamic_system_template(len(x)).reshape(-1)\n elif template_type == 1:\n reference = ppg_dual_double_frequency_template(len(x))\n if template_type == 2:\n reference = ppg_absolute_dual_skewness_template(len(x))\n if template_type == 3:\n reference = ecg_dynamic_template(len(x))\n alignmentOBE = dtw(x, reference, keep_internals=True,\n step_pattern='asymmetric', open_end=True,\n open_begin=True)\n match_distance = []\n for i in range(len(alignmentOBE.index2)):\n match_distance.append(\n alignmentOBE.costMatrix[i][alignmentOBE.index2[i]]\n )\n trace = alignmentOBE.costMatrix.trace()\n if trace == 0:\n ratio = float(np.log(1))\n else:\n ratio = float(np.log(np.sum(match_distance)/trace))\n return ratio\n"
] | [
[
"numpy.log",
"numpy.sum",
"scipy.spatial.distance.euclidean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
QianLabUSC/cognitively-enhanced-decision-framework | [
"1797ddd41edcbfbfafca5b599ff7ab70f5fdc37f"
] | [
"rule_based_decision_making.py"
] | [
"# This FILE is part of multi-legged robot field exploration model\r\n# env_wrapper.py - to obtain user interaction data from website\r\n#\r\n# This programm is explained by roboLAND in university of southern california.\r\n# Please notify the source if you use it\r\n# \r\n# Copyright(c) 2021-2025 Ryoma Liu\r\n# Email: [email protected]\r\n\r\nfrom env_wrapper import *\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import optimize\r\nimport random\r\nimport matplotlib.pylab as pylab\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom math import *\r\n\r\nclass rule_state_machine:\r\n def __init__(self):\r\n '''Initial env info and parameters for decision making\r\n '''\r\n self.states = ['Initial', 'Exploration', 'Verification']\r\n self.current_state = 0\r\n self.env = ENV()\r\n self.hypo_locations = (['No','Feature_low','Feature_middle',\r\n 'Feature_high'])\r\n self.hypo_location = 0\r\n self.hypo_samples = (['No','Feature_low', 'Feature_middle',\r\n 'Feature_high'])\r\n self.hypo_sample = 0\r\n self.information_matrix = []\r\n self.accuracy_matrix = []\r\n self.fitting_error_matrix = []\r\n\r\n\r\n def set_init_hypo(self, hypo_location, hypo_sample):\r\n self.hypo_location = hypo_location\r\n self.hypo_sample = hypo_sample\r\n\r\n def choose_initial_template(self):\r\n '''choose initial template\r\n\r\n According to the initial knowledge and hypothesis, human will select a\r\n experience data sample distribution\r\n\r\n Args:\r\n self.hypo_location: inital hypo about data location feature\r\n self.hypo_sample : initial hypo about data sample feature\r\n\r\n Returns:\r\n change the initial template in env wrapper\r\n '''\r\n if(self.hypo_location == 0):\r\n location_index = [1,9,13,21]\r\n elif(self.hypo_location == 1):\r\n location_index = [1,4,7,11,16,21]\r\n elif(self.hypo_location == 2):\r\n location_index = [1,5,9,12,15,21]\r\n elif(self.hypo_location == 3):\r\n location_index = [1,6,11,14,17,20]\r\n if(self.hypo_sample == 0):\r\n sample_index = [3,3,3,3]\r\n elif(self.hypo_sample == 1):\r\n sample_index = [5,5,3,3,3,3]\r\n elif(self.hypo_sample == 2):\r\n sample_index = [3,3,5,5,3,3]\r\n elif(self.hypo_sample == 3):\r\n sample_index = [3,3,3,3,5,5]\r\n initial_action = [location_index, sample_index]\r\n self.env.initiate_template(initial_action)\r\n\r\n def handle_information_coverage(self):\r\n sample_state = self.env.get_state()\r\n sample_loc = np.array(sample_state[0])\r\n sample_number = np.array(sample_state[1])\r\n sort_index = np.argsort(sample_loc)\r\n sample_loc = sample_loc[sort_index]\r\n sample_number = sample_number[sort_index]\r\n unique_index = np.unique(sample_loc, return_index = True)\r\n sample_loc = sample_loc[unique_index[1]]\r\n sample_number = sample_number[unique_index[1]]\r\n sample_state = [sample_loc, sample_number]\r\n\r\n print(sample_state) \r\n self.information_matrix = np.zeros(22) #information matrix in location\r\n self.variable_coverage = np.zeros(20)\r\n for i in range(len(sample_state[0])):\r\n scale = 0.1 * sample_state[1][i] + 1\r\n locs = sample_state[0][i] + 1\r\n self.information_matrix += gauss(locs, scale)\r\n # print(self.information_matrix)\r\n # print(gauss(locs, scale))\r\n # self.plot_line('cool green', np.linspace(1,22,22), gauss(locs, scale), 'test'+str(i))\r\n # print(\"coverage_matrix: \", self.information_matrix)\r\n mm, erodi = self.env.get_data_state()\r\n mm_mean = np.mean(mm, axis=0)\r\n mm_nonzero = mm[np.nonzero(mm)]\r\n mm_mean_nonzero = mm_mean[np.nonzero(mm_mean)]\r\n start = 0 # 区间左端点\r\n number_of_interval = 20 # 区间个数\r\n length = 1 # 区间长度\r\n intervals = {'{}~{}'.format(length*x+start, length*(x+1)+start): 0 for x in range(number_of_interval)} # 生成区间\r\n result = np.array(interval_statistics(mm_nonzero, intervals))\r\n self.variable_coverage = len(result[(np.nonzero(result))])/len(result)\r\n result_number = np.linspace(0, 19, 20)\r\n variable_information = np.zeros(20)\r\n for i in range(len(result_number)):\r\n single_converage = gauss_variable(result_number[i] +0.5, result[i])\r\n variable_information += single_converage\r\n # feed the variable coverage into the previous belief\r\n self.variable_information = variable_information\r\n \r\n # print(mm_mean_nonzero)\r\n # print(sample_state[0])\r\n # p , e = optimize.curve_fit(piecewise_linear_moisture, np.array(sample_state[0])+1, mm_mean_nonzero)\r\n # xloc = np.linspace(1, 22, 22)\r\n # xmoisture = piecewise_linear_moisture(xloc, *p)\r\n # self.mapping_value = []\r\n # for emoisture in xmoisture:\r\n # self.mapping_value.append(variable_information[int(emoisture)])\r\n\r\n # print(variable_information)\r\n # print(self.mapping_value)\r\n # plt.plot(xloc,xmoisture )\r\n \r\n # plt.show()\r\n\r\n\r\n def handle_information_accuracy(self):\r\n accuracy_matrix = []\r\n mm, data_state = self.env.get_data_state()\r\n loc_state = self.env.get_state()\r\n # error_cost = np.std(data_state, axis=0)\r\n for col in range(data_state.shape[1]): \r\n if col in loc_state[0]:\r\n effective_data = data_state[:,col][np.nonzero(data_state[:,col])]\r\n # print(effective_data)\r\n median = np.median(effective_data) \r\n k1 = 1.4826\r\n mad = k1 * np.median(np.abs(effective_data-median))\r\n lower_limit = median - (3*mad)\r\n upper_limit = median + (3*mad)\r\n outlier_data_num = (len(effective_data[(effective_data> \r\n upper_limit) & (effective_data<lower_limit)]))\r\n data_samples = len(effective_data)\r\n if(data_samples == 0):\r\n total_cost = 0\r\n elif(data_samples > 0):\r\n total_cost = 1 - 1/(1+ (data_samples - 0.99)/(3*outlier_data_num + 1))\r\n accuracy_matrix.append(total_cost)\r\n else:\r\n accuracy_matrix.append(0)\r\n self.accuracy_matrix = accuracy_matrix\r\n # print('accuracy_matrix: ', self.accuracy_matrix)\r\n\r\n\r\n def handle_feature_point_detection(self):\r\n loc_state = self.env.get_state()[0]\r\n #print(self.env.get_state())\r\n self.fitting_error_matrix = np.zeros(22)\r\n mm, erodi = self.env.get_data_state()\r\n mm_mean = np.mean(mm, axis=0)\r\n mm_nonzeroindex = (mm_mean != 0)\r\n erodi_mean = np.mean(erodi, axis=0)\r\n self.loc_index = np.linspace(1,22,22)[mm_nonzeroindex]\r\n data_index = mm_mean[mm_nonzeroindex]\r\n data_mean = erodi_mean[mm_nonzeroindex]\r\n p , e = optimize.curve_fit(piecewise_linear, data_index, data_mean)\r\n # print('dfadfaaf', p)\r\n xd = np.linspace(0, np.max(data_index), 22)\r\n fit_curve = piecewise_linear(xd, *p)\r\n fitting_results = piecewise_linear(data_index, *p)\r\n self.fitting_results = fitting_results\r\n fitting_error = fitting_results - data_mean\r\n mm_mean[mm_nonzeroindex] = fitting_error\r\n self.data_index = data_index\r\n self.fitting_error_matrix[mm_nonzeroindex] = fitting_error\r\n\r\n # print(data_mean)\r\n nonzero_data_mean = data_mean[np.nonzero(data_mean != 0)]\r\n rmse_data = (sqrt(np.sum(np.power(nonzero_data_mean, 2))/\r\n np.size(nonzero_data_mean)))\r\n # print(rmse_data)\r\n self.rmse_data = rmse_data\r\n # plt.plot(xd, fit_curve)\r\n # plt.plot(data_index, data_mean, \"o\")\r\n # plt.plot(data_index, fitting_results, \"*\")\r\n # #plt.plot(data_index, fitting_error, \"+\")\r\n # plt.show()\r\n # plt.savefig('123.png')\r\n\r\n\r\n # find the feature point location\r\n array = np.asarray(data_index)\r\n idx = (np.abs(array - p[0])).argmin()\r\n loc_indx = loc_state[idx]\r\n saturation_estimated = int(loc_indx * (p[0]/array[idx]))\r\n self.saturation_selection = np.arange(saturation_estimated - 2, saturation_estimated + 3, 1)\r\n \r\n\r\n def confidence_model(self):\r\n non_zero_matrix = (self.fitting_error_matrix[np.nonzero\r\n (self.fitting_error_matrix != 0)])\r\n rmse = (sqrt(np.sum(np.power(non_zero_matrix, 2))/\r\n np.size(non_zero_matrix)))\r\n # print(rmse)\r\n # print(self.fitting_error_matrix)\r\n # print(non_zero_matrix)\r\n whole_rmse_percentage = rmse/self.rmse_data\r\n # print(whole_rmse_percentage)\r\n confindence = (0.04 - whole_rmse_percentage) * 30 * self.coverage_criteria\r\n # print(confindence)\r\n\r\n def handle_state_judge(self):\r\n if(self.current_state == 0):\r\n self.current_state = 1\r\n elif(self.current_state == 1):\r\n if(np.min(self.accuracy_matrix) > 0.7 and \r\n len(self.information_matrix[self.information_matrix > 0.8]) > 15):\r\n self.current_state = 2\r\n else: \r\n self.current_state = 1\r\n elif(self.current_state == 2):\r\n if(len(self.fitting_error_matrix[self.fitting_error_matrix > 0.8]) > 0):\r\n self.current_state = 1\r\n elif():\r\n self.current_state = 2\r\n \r\n def information_model(self):\r\n self.coverage_criteria = (len(self.information_matrix[self.information_matrix\r\n > 0.3]) / 22)\r\n accuracy_matrix = np.array(self.accuracy_matrix)\r\n # print(accuracy_matrix)\r\n self.accuracy_criteria = (len(accuracy_matrix[(accuracy_matrix > 0.6) & (accuracy_matrix != 0)]) /\r\n len(accuracy_matrix[accuracy_matrix != 0]))\r\n \r\n # print('accuracy_value:', self.accuracy_criteria) # percentage of locs which the accuracy is lower than 0.6\r\n # print('coverage_value:', self.coverage_criteria) # percentage of locs which the information is lower than 0.8\r\n \r\n\r\n def take_action(self):\r\n if(self.current_state == 0):\r\n self.choose_initial_template()\r\n elif(self.current_state == 1):\r\n action_loc = np.argmin(self.information_matrix)\r\n self.env.set_action([action_loc],[3])\r\n accuracy_loc = np.where(self.accuracy_matrix < 0.7)\r\n accuracy_samples = np.ones(len(accuracy_loc))\r\n self.env.set_action(accuracy_loc,accuracy_samples) \r\n elif(self.current_state == 2):\r\n fitting_error_loc = np.where(self.fitting_error_matrix > 0.8)\r\n add_loc = []\r\n add_samples = []\r\n current_state = self.env.get_state()\r\n for i in fitting_error_loc:\r\n if not i+1 in current_state[0]:\r\n add_loc.append(i+1)\r\n add_samples.append(3)\r\n if not i-1 in current_state[0]:\r\n add_loc.append(i-1)\r\n add_samples.append(3)\r\n self.env.set_action(add_loc, add_samples)\r\n\r\n def plot(self, color, name):\r\n myparams = {\r\n\r\n 'axes.labelsize': '10',\r\n\r\n 'xtick.labelsize': '10',\r\n\r\n 'ytick.labelsize': '10',\r\n\r\n 'lines.linewidth': 1,\r\n\r\n 'legend.fontsize': '3',\r\n\r\n 'font.family': 'Times New Roman',\r\n\r\n 'figure.figsize': '9, 5' #图片尺寸\r\n\r\n }\r\n\r\n pylab.rcParams.update(myparams) #更新自己的设置\r\n # line_styles=['ro-','b^-','gs-','ro--','b^--','gs--'] #线型设置\r\n \r\n fig1 = plt.figure(1)\r\n a = plt.plot(self.coverage_criteria, self.accuracy_criteria ,marker='o', color=sns.xkcd_rgb[color],\r\n markersize=5)\r\n \r\n plt.legend(loc=\"lower right\") #图例位置 右下角\r\n plt.ylabel('accuracy') \r\n plt.xlabel('coverage ') \r\n plt.xlim((0, 1.1))\r\n plt.ylim((0, 1.1))\r\n plt.axvline(x=1, c=\"b\", ls=\"--\", lw=1)\r\n plt.axhline(y=1, c=\"b\", ls=\"--\", lw=1)\r\n plt.savefig(name)\r\n\r\n #注意.show()操作后会默认打开一个空白fig,此时保存,容易出现保存的为纯白背景,所以请在show()操作前保存fig.\r\n # plt.show()\r\n\r\ndef interval_statistics(data, intervals):\r\n if len(data) == 0:\r\n return\r\n for num in data:\r\n for interval in intervals:\r\n lr = tuple(interval.split('~'))\r\n left, right = float(lr[0]), float(lr[1])\r\n if left <= num <= right:\r\n intervals[interval] += 1\r\n results = []\r\n for key, value in intervals.items():\r\n #print(\"%10s\" % key, end='') # 借助 end=''可以不换行\r\n # print(\"%10s\" % value, end='') # \"%10s\" 右对齐\r\n #print('%16s' % '{:.3%}'.format(value * 1.0 / len(data)))\r\n results.append(value)\r\n return results\r\n\r\n\r\ndef piecewise_linear(x, x0, y0, k1):\r\n\t# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0\r\n\t# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0\r\n return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0, \r\n lambda x: y0])\r\ndef piecewise_linear_moisture(x, x0, y0, k1, k2):\r\n\t# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0\r\n\t# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0\r\n return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0, \r\n lambda x: k2*x + y0 - k2*x0])\r\ndef gauss(mean, scale, x=np.linspace(1,22,22), sigma=1):\r\n return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))\r\ndef gauss_variable(mean, scale, x=np.linspace(0,19,20), sigma=1):\r\n return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))\r\nif __name__ == \"__main__\":\r\n DM = rule_state_machine()\r\n DM.choose_initial_template()\r\n # x = np.linspace(1,22,22)\r\n # information_matrix = gauss(1,0.1).reshape(22,1)\r\n # print(information_matrix)\r\n # sns.set()\r\n # ax = sns.heatmap(information_matrix, vmin=0, vmax=1)\r\n # plt.title('Information Matrix')\r\n # plt.savefig(\"test.png\") \r\n DM.handle_information_accuracy()\r\n DM.handle_information_coverage()\r\n DM.information_model()\r\n DM.plot('cool green','test') \r\n DM.handle_feature_point_detection()\r\n DM.confidence_model()\r\n "
] | [
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"numpy.argmin",
"scipy.optimize.curve_fit",
"numpy.where",
"numpy.square",
"matplotlib.pylab.rcParams.update",
"numpy.unique",
"numpy.arange",
"numpy.size",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.nonzero",
"numpy.power",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.median",
"matplotlib.pyplot.savefig",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"numpy.abs",
"numpy.piecewise",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
GT-AcerZhang/yolov3.insects_challenge | [
"1ac6ee5a8a5c534ec11723542f4c10583935a2ad"
] | [
"train.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport time\nimport os\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.base import to_variable\n\nfrom reader import data_loader, test_data_loader, multithread_loader\nfrom yolov3 import YOLOv3\n\n# train.py\n# 提升点: 可以改变anchor的大小,注意训练和测试时要使用同样的anchor\nANCHORS = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]\n\nANCHOR_MASKS = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n\nIGNORE_THRESH = .7\nNUM_CLASSES = 7\n\nTRAINDIR = 'insects/train'\nVALIDDIR = 'insects/val'\n\n# train.py\nif __name__ == '__main__':\n with fluid.dygraph.guard():\n model = YOLOv3('yolov3', num_classes = NUM_CLASSES, is_train=True)\n opt = fluid.optimizer.Momentum(\n learning_rate=0.001, #提升点:可以调整学习率,或者设置学习率衰减\n momentum=0.9) # 提升点: 可以添加正则化项\n\n train_loader = multithread_loader(TRAINDIR, batch_size= 10, mode='train')\n valid_loader = multithread_loader(VALIDDIR, batch_size= 10, mode='valid')\n\n MAX_EPOCH = 100 # 提升点: 可以改变训练的轮数\n for epoch in range(MAX_EPOCH):\n for i, data in enumerate(train_loader()):\n img, gt_boxes, gt_labels, img_scale = data\n gt_scores = np.ones(gt_labels.shape).astype('float32')\n gt_scores = to_variable(gt_scores)\n img = to_variable(img)\n gt_boxes = to_variable(gt_boxes)\n gt_labels = to_variable(gt_labels)\n outputs = model(img)\n loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,\n anchors = ANCHORS,\n anchor_masks = ANCHOR_MASKS,\n ignore_thresh=IGNORE_THRESH,\n use_label_smooth=False)\n\n loss.backward()\n opt.minimize(loss)\n model.clear_gradients()\n if i % 1 == 0:\n timestring = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n print('{}[TRAIN]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))\n\n # save params of model\n if (epoch % 5 == 0) or (epoch == MAX_EPOCH -1):\n fluid.save_dygraph(model.state_dict(), 'yolo_epoch{}'.format(epoch))\n \n # 每个epoch结束之后在验证集上进行测试\n model.eval()\n for i, data in enumerate(valid_loader()):\n img, gt_boxes, gt_labels, img_scale = data\n gt_scores = np.ones(gt_labels.shape).astype('float32')\n gt_scores = to_variable(gt_scores)\n img = to_variable(img)\n gt_boxes = to_variable(gt_boxes)\n gt_labels = to_variable(gt_labels)\n outputs = model(img)\n loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,\n anchors = ANCHORS,\n anchor_masks = ANCHOR_MASKS,\n ignore_thresh=IGNORE_THRESH,\n use_label_smooth=False)\n if i % 1 == 0:\n timestring = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n print('{}[VALID]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))\n model.train()\n\n\n"
] | [
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShanuDey/tf-slim | [
"19c840abfa6de567d760254c42ea68760cf5d9f0"
] | [
"tf_slim/nets/vgg_test.py"
] | [
"# coding=utf-8\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.nets.vgg.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tf_slim.nets import vgg\nfrom tf_slim.ops import variables as variables_lib\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n# pylint:enable=g-direct-tensorflow-import\n\n\nclass VGGATest(test.TestCase):\n\n def testBuild(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(inputs, num_classes)\n self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testFullyConvolutional(self):\n batch_size = 1\n height, width = 256, 256\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)\n self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, 2, 2, num_classes])\n\n def testEndPoints(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n for is_training in [True, False]:\n with ops.Graph().as_default():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n _, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training)\n expected_names = [\n 'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',\n 'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',\n 'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',\n 'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',\n 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'\n ]\n self.assertSetEqual(set(end_points.keys()), set(expected_names))\n\n def testModelVariables(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n vgg.vgg_a(inputs, num_classes)\n expected_names = [\n 'vgg_a/conv1/conv1_1/weights',\n 'vgg_a/conv1/conv1_1/biases',\n 'vgg_a/conv2/conv2_1/weights',\n 'vgg_a/conv2/conv2_1/biases',\n 'vgg_a/conv3/conv3_1/weights',\n 'vgg_a/conv3/conv3_1/biases',\n 'vgg_a/conv3/conv3_2/weights',\n 'vgg_a/conv3/conv3_2/biases',\n 'vgg_a/conv4/conv4_1/weights',\n 'vgg_a/conv4/conv4_1/biases',\n 'vgg_a/conv4/conv4_2/weights',\n 'vgg_a/conv4/conv4_2/biases',\n 'vgg_a/conv5/conv5_1/weights',\n 'vgg_a/conv5/conv5_1/biases',\n 'vgg_a/conv5/conv5_2/weights',\n 'vgg_a/conv5/conv5_2/biases',\n 'vgg_a/fc6/weights',\n 'vgg_a/fc6/biases',\n 'vgg_a/fc7/weights',\n 'vgg_a/fc7/biases',\n 'vgg_a/fc8/weights',\n 'vgg_a/fc8/biases',\n ]\n model_variables = [v.op.name for v in variables_lib.get_model_variables()]\n self.assertSetEqual(set(model_variables), set(expected_names))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(eval_inputs, is_training=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n predictions = math_ops.argmax(logits, 1)\n self.assertListEqual(predictions.get_shape().as_list(), [batch_size])\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 2\n eval_batch_size = 1\n train_height, train_width = 224, 224\n eval_height, eval_width = 256, 256\n num_classes = 1000\n with self.cached_session():\n train_inputs = random_ops.random_uniform(\n (train_batch_size, train_height, train_width, 3))\n logits, _ = vgg.vgg_a(train_inputs)\n self.assertListEqual(logits.get_shape().as_list(),\n [train_batch_size, num_classes])\n variable_scope.get_variable_scope().reuse_variables()\n eval_inputs = random_ops.random_uniform(\n (eval_batch_size, eval_height, eval_width, 3))\n logits, _ = vgg.vgg_a(\n eval_inputs, is_training=False, spatial_squeeze=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [eval_batch_size, 2, 2, num_classes])\n logits = math_ops.reduce_mean(logits, [1, 2])\n predictions = math_ops.argmax(logits, 1)\n self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])\n\n def testForward(self):\n batch_size = 1\n height, width = 224, 224\n with self.cached_session() as sess:\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(inputs)\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits)\n self.assertTrue(output.any())\n\n\nclass VGG16Test(test.TestCase):\n\n def testBuild(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(inputs, num_classes)\n self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testFullyConvolutional(self):\n batch_size = 1\n height, width = 256, 256\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)\n self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, 2, 2, num_classes])\n\n def testEndPoints(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n for is_training in [True, False]:\n with ops.Graph().as_default():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n _, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training)\n expected_names = [\n 'vgg_16/conv1/conv1_1', 'vgg_16/conv1/conv1_2', 'vgg_16/pool1',\n 'vgg_16/conv2/conv2_1', 'vgg_16/conv2/conv2_2', 'vgg_16/pool2',\n 'vgg_16/conv3/conv3_1', 'vgg_16/conv3/conv3_2',\n 'vgg_16/conv3/conv3_3', 'vgg_16/pool3', 'vgg_16/conv4/conv4_1',\n 'vgg_16/conv4/conv4_2', 'vgg_16/conv4/conv4_3', 'vgg_16/pool4',\n 'vgg_16/conv5/conv5_1', 'vgg_16/conv5/conv5_2',\n 'vgg_16/conv5/conv5_3', 'vgg_16/pool5', 'vgg_16/fc6', 'vgg_16/fc7',\n 'vgg_16/fc8'\n ]\n self.assertSetEqual(set(end_points.keys()), set(expected_names))\n\n def testModelVariables(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n vgg.vgg_16(inputs, num_classes)\n expected_names = [\n 'vgg_16/conv1/conv1_1/weights',\n 'vgg_16/conv1/conv1_1/biases',\n 'vgg_16/conv1/conv1_2/weights',\n 'vgg_16/conv1/conv1_2/biases',\n 'vgg_16/conv2/conv2_1/weights',\n 'vgg_16/conv2/conv2_1/biases',\n 'vgg_16/conv2/conv2_2/weights',\n 'vgg_16/conv2/conv2_2/biases',\n 'vgg_16/conv3/conv3_1/weights',\n 'vgg_16/conv3/conv3_1/biases',\n 'vgg_16/conv3/conv3_2/weights',\n 'vgg_16/conv3/conv3_2/biases',\n 'vgg_16/conv3/conv3_3/weights',\n 'vgg_16/conv3/conv3_3/biases',\n 'vgg_16/conv4/conv4_1/weights',\n 'vgg_16/conv4/conv4_1/biases',\n 'vgg_16/conv4/conv4_2/weights',\n 'vgg_16/conv4/conv4_2/biases',\n 'vgg_16/conv4/conv4_3/weights',\n 'vgg_16/conv4/conv4_3/biases',\n 'vgg_16/conv5/conv5_1/weights',\n 'vgg_16/conv5/conv5_1/biases',\n 'vgg_16/conv5/conv5_2/weights',\n 'vgg_16/conv5/conv5_2/biases',\n 'vgg_16/conv5/conv5_3/weights',\n 'vgg_16/conv5/conv5_3/biases',\n 'vgg_16/fc6/weights',\n 'vgg_16/fc6/biases',\n 'vgg_16/fc7/weights',\n 'vgg_16/fc7/biases',\n 'vgg_16/fc8/weights',\n 'vgg_16/fc8/biases',\n ]\n model_variables = [v.op.name for v in variables_lib.get_model_variables()]\n self.assertSetEqual(set(model_variables), set(expected_names))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(eval_inputs, is_training=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n predictions = math_ops.argmax(logits, 1)\n self.assertListEqual(predictions.get_shape().as_list(), [batch_size])\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 2\n eval_batch_size = 1\n train_height, train_width = 224, 224\n eval_height, eval_width = 256, 256\n num_classes = 1000\n with self.cached_session():\n train_inputs = random_ops.random_uniform(\n (train_batch_size, train_height, train_width, 3))\n logits, _ = vgg.vgg_16(train_inputs)\n self.assertListEqual(logits.get_shape().as_list(),\n [train_batch_size, num_classes])\n variable_scope.get_variable_scope().reuse_variables()\n eval_inputs = random_ops.random_uniform(\n (eval_batch_size, eval_height, eval_width, 3))\n logits, _ = vgg.vgg_16(\n eval_inputs, is_training=False, spatial_squeeze=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [eval_batch_size, 2, 2, num_classes])\n logits = math_ops.reduce_mean(logits, [1, 2])\n predictions = math_ops.argmax(logits, 1)\n self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])\n\n def testForward(self):\n batch_size = 1\n height, width = 224, 224\n with self.cached_session() as sess:\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(inputs)\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits)\n self.assertTrue(output.any())\n\n\nclass VGG19Test(test.TestCase):\n\n def testBuild(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(inputs, num_classes)\n self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testFullyConvolutional(self):\n batch_size = 1\n height, width = 256, 256\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)\n self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, 2, 2, num_classes])\n\n def testEndPoints(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n for is_training in [True, False]:\n with ops.Graph().as_default():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n _, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)\n expected_names = [\n 'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2', 'vgg_19/pool1',\n 'vgg_19/conv2/conv2_1', 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',\n 'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',\n 'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4', 'vgg_19/pool3',\n 'vgg_19/conv4/conv4_1', 'vgg_19/conv4/conv4_2',\n 'vgg_19/conv4/conv4_3', 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',\n 'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',\n 'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4', 'vgg_19/pool5',\n 'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'\n ]\n self.assertSetEqual(set(end_points.keys()), set(expected_names))\n\n def testModelVariables(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n vgg.vgg_19(inputs, num_classes)\n expected_names = [\n 'vgg_19/conv1/conv1_1/weights',\n 'vgg_19/conv1/conv1_1/biases',\n 'vgg_19/conv1/conv1_2/weights',\n 'vgg_19/conv1/conv1_2/biases',\n 'vgg_19/conv2/conv2_1/weights',\n 'vgg_19/conv2/conv2_1/biases',\n 'vgg_19/conv2/conv2_2/weights',\n 'vgg_19/conv2/conv2_2/biases',\n 'vgg_19/conv3/conv3_1/weights',\n 'vgg_19/conv3/conv3_1/biases',\n 'vgg_19/conv3/conv3_2/weights',\n 'vgg_19/conv3/conv3_2/biases',\n 'vgg_19/conv3/conv3_3/weights',\n 'vgg_19/conv3/conv3_3/biases',\n 'vgg_19/conv3/conv3_4/weights',\n 'vgg_19/conv3/conv3_4/biases',\n 'vgg_19/conv4/conv4_1/weights',\n 'vgg_19/conv4/conv4_1/biases',\n 'vgg_19/conv4/conv4_2/weights',\n 'vgg_19/conv4/conv4_2/biases',\n 'vgg_19/conv4/conv4_3/weights',\n 'vgg_19/conv4/conv4_3/biases',\n 'vgg_19/conv4/conv4_4/weights',\n 'vgg_19/conv4/conv4_4/biases',\n 'vgg_19/conv5/conv5_1/weights',\n 'vgg_19/conv5/conv5_1/biases',\n 'vgg_19/conv5/conv5_2/weights',\n 'vgg_19/conv5/conv5_2/biases',\n 'vgg_19/conv5/conv5_3/weights',\n 'vgg_19/conv5/conv5_3/biases',\n 'vgg_19/conv5/conv5_4/weights',\n 'vgg_19/conv5/conv5_4/biases',\n 'vgg_19/fc6/weights',\n 'vgg_19/fc6/biases',\n 'vgg_19/fc7/weights',\n 'vgg_19/fc7/biases',\n 'vgg_19/fc8/weights',\n 'vgg_19/fc8/biases',\n ]\n model_variables = [v.op.name for v in variables_lib.get_model_variables()]\n self.assertSetEqual(set(model_variables), set(expected_names))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(eval_inputs, is_training=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n predictions = math_ops.argmax(logits, 1)\n self.assertListEqual(predictions.get_shape().as_list(), [batch_size])\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 2\n eval_batch_size = 1\n train_height, train_width = 224, 224\n eval_height, eval_width = 256, 256\n num_classes = 1000\n with self.cached_session():\n train_inputs = random_ops.random_uniform(\n (train_batch_size, train_height, train_width, 3))\n logits, _ = vgg.vgg_19(train_inputs)\n self.assertListEqual(logits.get_shape().as_list(),\n [train_batch_size, num_classes])\n variable_scope.get_variable_scope().reuse_variables()\n eval_inputs = random_ops.random_uniform(\n (eval_batch_size, eval_height, eval_width, 3))\n logits, _ = vgg.vgg_19(\n eval_inputs, is_training=False, spatial_squeeze=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [eval_batch_size, 2, 2, num_classes])\n logits = math_ops.reduce_mean(logits, [1, 2])\n predictions = math_ops.argmax(logits, 1)\n self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])\n\n def testForward(self):\n batch_size = 1\n height, width = 224, 224\n with self.cached_session() as sess:\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(inputs)\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits)\n self.assertTrue(output.any())\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.random_ops.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
pedrob37/PhysicsPyTorch | [
"a892dfe89740b6fa75d3de5319f99d41bcf4ca63",
"a892dfe89740b6fa75d3de5319f99d41bcf4ca63",
"a892dfe89740b6fa75d3de5319f99d41bcf4ca63"
] | [
"ponai/data/grid_dataset.py",
"ponai/transforms/post/array.py",
"ponai/transforms/intensity/array.py"
] | [
"# Copyright 2020 ponai Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom typing import Union\n\nimport torch\nfrom torch.utils.data import IterableDataset\n\nfrom ponai.data.utils import iter_patch\nfrom ponai.utils import NumpyPadMode\n\n\nclass GridPatchDataset(IterableDataset):\n \"\"\"\n Yields patches from arrays read from an input dataset. The patches are chosen in a contiguous grid sampling scheme.\n \"\"\"\n\n def __init__(\n self, dataset, patch_size, start_pos=(), mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP, **pad_opts\n ):\n \"\"\"\n Initializes this dataset in terms of the input dataset and patch size. The `patch_size` is the size of the\n patch to sample from the input arrays. It is assumed the arrays first dimension is the channel dimension which\n will be yielded in its entirety so this should not be specified in `patch_size`. For example, for an input 3D\n array with 1 channel of size (1, 20, 20, 20) a regular grid sampling of eight patches (1, 10, 10, 10) would be\n specified by a `patch_size` of (10, 10, 10).\n\n Args:\n dataset (Dataset): the dataset to read array data from\n patch_size (tuple of int or None): size of patches to generate slices for, 0/None selects whole dimension\n start_pos (tuple of it, optional): starting position in the array, default is 0 for each dimension\n mode: {``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``, ``\"mean\"``,\n ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n One of the listed string values or a user supplied function. Defaults to ``\"wrap\"``.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n pad_opts (dict, optional): padding options, see numpy.pad\n \"\"\"\n\n self.dataset = dataset\n self.patch_size = (None,) + tuple(patch_size)\n self.start_pos = start_pos\n self.mode: NumpyPadMode = NumpyPadMode(mode)\n self.pad_opts = pad_opts\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n iter_start = 0\n iter_end = len(self.dataset)\n\n if worker_info is not None:\n # split workload\n per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers)))\n worker_id = worker_info.id\n iter_start = iter_start + worker_id * per_worker\n iter_end = min(iter_start + per_worker, iter_end)\n\n for index in range(iter_start, iter_end):\n arrays = self.dataset[index]\n\n iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode, **self.pad_opts) for a in arrays]\n\n yield from zip(*iters)\n",
"# Copyright 2020 ponai Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for the model output tensors\nhttps://github.com/Project-ponai/ponai/wiki/ponai_Design\n\"\"\"\n\nfrom typing import Optional, Callable\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ponai.transforms.compose import Transform\nfrom ponai.networks import one_hot\nfrom ponai.transforms.utils import get_largest_connected_component_mask\nfrom ponai.utils import ensure_tuple\n\n\nclass SplitChannel(Transform):\n \"\"\"\n Split PyTorch Tensor data according to the channel dim, if only 1 channel, convert to One-Hot\n format first based on the class number. Users can use this transform to compute metrics on every\n single class to get more details of validation/evaluation. Expected input shape:\n ``(batch_size, num_channels, [spatial_dim_1, spatial_dim_2, ...])``\n\n Args:\n to_onehot: whether to convert the data to One-Hot format first.\n Defaults to ``False``.\n num_classes: the class number used to convert to One-Hot format if `to_onehot` is True.\n Defaults to ``None``.\n \"\"\"\n\n def __init__(self, to_onehot: bool = False, num_classes: Optional[int] = None):\n self.to_onehot = to_onehot\n self.num_classes = num_classes\n\n def __call__(self, img, to_onehot: Optional[bool] = None, num_classes: Optional[int] = None):\n \"\"\"\n Args:\n to_onehot: whether to convert the data to One-Hot format first.\n Defaults to ``self.to_onehot``.\n num_classes: the class number used to convert to One-Hot format if `to_onehot` is True.\n Defaults to ``self.num_classes``.\n \"\"\"\n if to_onehot or self.to_onehot:\n if num_classes is None:\n num_classes = self.num_classes\n assert isinstance(num_classes, int), \"must specify class number for One-Hot.\"\n img = one_hot(img, num_classes)\n n_classes = img.shape[1]\n outputs = list()\n for i in range(n_classes):\n outputs.append(img[:, i : i + 1])\n\n return outputs\n\n\nclass Activations(Transform):\n \"\"\"\n Add activation operations to the model output, typically `Sigmoid` or `Softmax`.\n\n Args:\n sigmoid: whether to execute sigmoid function on model output before transform.\n Defaults to ``False``.\n softmax: whether to execute softmax function on model output before transform.\n Defaults to ``False``.\n other: callable function to execute other activation layers, for example:\n `other = lambda x: torch.tanh(x)`. Defaults to ``None``.\n\n \"\"\"\n\n def __init__(self, sigmoid: bool = False, softmax: bool = False, other: Optional[Callable] = None):\n self.sigmoid = sigmoid\n self.softmax = softmax\n self.other = other\n\n def __call__(\n self, img, sigmoid: Optional[bool] = None, softmax: Optional[bool] = None, other: Optional[Callable] = None\n ):\n \"\"\"\n Args:\n sigmoid: whether to execute sigmoid function on model output before transform.\n Defaults to ``self.sigmoid``.\n softmax: whether to execute softmax function on model output before transform.\n Defaults to ``self.softmax``.\n other: callable function to execute other activation layers, for example:\n `other = lambda x: torch.tanh(x)`. Defaults to ``self.other``.\n\n Raises:\n ValueError: sigmoid=True and softmax=True are not compatible.\n ValueError: act_func must be a Callable function.\n\n \"\"\"\n if sigmoid is True and softmax is True:\n raise ValueError(\"sigmoid=True and softmax=True are not compatible.\")\n if sigmoid or self.sigmoid:\n img = torch.sigmoid(img)\n if softmax or self.softmax:\n img = torch.softmax(img, dim=1)\n act_func = self.other if other is None else other\n if act_func is not None:\n if not callable(act_func):\n raise ValueError(\"act_func must be a Callable function.\")\n img = act_func(img)\n\n return img\n\n\nclass AsDiscrete(Transform):\n \"\"\"\n Execute after model forward to transform model output to discrete values.\n It can complete below operations:\n\n - execute `argmax` for input logits values.\n - threshold input value to 0.0 or 1.0.\n - convert input value to One-Hot format\n\n Args:\n argmax: whether to execute argmax function on input data before transform.\n Defaults to ``False``.\n to_onehot: whether to convert input data into the one-hot format.\n Defaults to ``False``.\n n_classes: the number of classes to convert to One-Hot format.\n Defaults to ``None``.\n threshold_values: whether threshold the float value to int number 0 or 1.\n Defaults to ``False``.\n logit_thresh: the threshold value for thresholding operation..\n Defaults to ``0.5``.\n\n \"\"\"\n\n def __init__(\n self,\n argmax: bool = False,\n to_onehot: bool = False,\n n_classes: Optional[int] = None,\n threshold_values: bool = False,\n logit_thresh: float = 0.5,\n ):\n self.argmax = argmax\n self.to_onehot = to_onehot\n self.n_classes = n_classes\n self.threshold_values = threshold_values\n self.logit_thresh = logit_thresh\n\n def __call__(\n self,\n img,\n argmax: Optional[bool] = None,\n to_onehot: Optional[bool] = None,\n n_classes: Optional[int] = None,\n threshold_values: Optional[bool] = None,\n logit_thresh: Optional[float] = None,\n ):\n \"\"\"\n Args:\n argmax: whether to execute argmax function on input data before transform.\n Defaults to ``self.argmax``.\n to_onehot: whether to convert input data into the one-hot format.\n Defaults to ``self.to_onehot``.\n n_classes: the number of classes to convert to One-Hot format.\n Defaults to ``self.n_classes``.\n threshold_values: whether threshold the float value to int number 0 or 1.\n Defaults to ``self.threshold_values``.\n logit_thresh: the threshold value for thresholding operation..\n Defaults to ``self.logit_thresh``.\n\n \"\"\"\n if argmax or self.argmax:\n img = torch.argmax(img, dim=1, keepdim=True)\n\n if to_onehot or self.to_onehot:\n _nclasses = self.n_classes if n_classes is None else n_classes\n assert isinstance(_nclasses, int), \"One of self.n_classes or n_classes must be an integer\"\n img = one_hot(img, _nclasses)\n\n if threshold_values or self.threshold_values:\n img = img >= (self.logit_thresh if logit_thresh is None else logit_thresh)\n\n return img.float()\n\n\nclass KeepLargestConnectedComponent(Transform):\n \"\"\"\n Keeps only the largest connected component in the image.\n This transform can be used as a post-processing step to clean up over-segment areas in model output.\n\n The input is assumed to be a PyTorch Tensor:\n 1) With shape (batch_size, 1, spatial_dim1[, spatial_dim2, ...]) and the values correspond to expected labels.\n 2) With shape (batch_size, C, spatial_dim1[, spatial_dim2, ...]) and the values should be 0, 1 on each labels.\n\n Note:\n For single channel data, 0 will be treated as background and the over-segment pixels will be set to 0.\n For one-hot data, the over-segment pixels will be set to 0 in its channel.\n\n For example:\n Use KeepLargestConnectedComponent with applied_labels=[1], connectivity=1::\n\n [1, 0, 0] [0, 0, 0]\n [0, 1, 1] => [0, 1 ,1]\n [0, 1, 1] [0, 1, 1]\n\n Use KeepLargestConnectedComponent with applied_labels[1, 2], independent=False, connectivity=1::\n\n [0, 0, 1, 0 ,0] [0, 0, 1, 0 ,0]\n [0, 2, 1, 1 ,1] [0, 2, 1, 1 ,1]\n [1, 2, 1, 0 ,0] => [1, 2, 1, 0 ,0]\n [1, 2, 0, 1 ,0] [1, 2, 0, 0 ,0]\n [2, 2, 0, 0 ,2] [2, 2, 0, 0 ,0]\n\n Use KeepLargestConnectedComponent with applied_labels[1, 2], independent=True, connectivity=1::\n\n [0, 0, 1, 0 ,0] [0, 0, 1, 0 ,0]\n [0, 2, 1, 1 ,1] [0, 2, 1, 1 ,1]\n [1, 2, 1, 0 ,0] => [0, 2, 1, 0 ,0]\n [1, 2, 0, 1 ,0] [0, 2, 0, 0 ,0]\n [2, 2, 0, 0 ,2] [2, 2, 0, 0 ,0]\n\n Use KeepLargestConnectedComponent with applied_labels[1, 2], independent=False, connectivity=2::\n\n [0, 0, 1, 0 ,0] [0, 0, 1, 0 ,0]\n [0, 2, 1, 1 ,1] [0, 2, 1, 1 ,1]\n [1, 2, 1, 0 ,0] => [1, 2, 1, 0 ,0]\n [1, 2, 0, 1 ,0] [1, 2, 0, 1 ,0]\n [2, 2, 0, 0 ,2] [2, 2, 0, 0 ,2]\n\n \"\"\"\n\n def __init__(self, applied_labels, independent: bool = True, connectivity: Optional[int] = None):\n \"\"\"\n Args:\n applied_labels (int, list or tuple of int): Labels for applying the connected component on.\n If only one channel. The pixel whose value is not in this list will remain unchanged.\n If the data is in one-hot format, this is used to determine what channels to apply.\n independent (bool): consider several labels as a whole or independent, default is `True`.\n Example use case would be segment label 1 is liver and label 2 is liver tumor, in that case\n you want this \"independent\" to be specified as False.\n connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n \"\"\"\n super().__init__()\n self.applied_labels = ensure_tuple(applied_labels)\n self.independent = independent\n self.connectivity = connectivity\n\n def __call__(self, img):\n \"\"\"\n Args:\n img: shape must be (batch_size, C, spatial_dim1[, spatial_dim2, ...]).\n\n Returns:\n A PyTorch Tensor with shape (batch_size, C, spatial_dim1[, spatial_dim2, ...]).\n \"\"\"\n channel_dim = 1\n if img.shape[channel_dim] == 1:\n\n img = torch.squeeze(img, dim=channel_dim)\n\n if self.independent:\n for i in self.applied_labels:\n foreground = (img == i).type(torch.uint8)\n mask = get_largest_connected_component_mask(foreground, self.connectivity)\n img[foreground != mask] = 0\n else:\n foreground = torch.zeros_like(img)\n for i in self.applied_labels:\n foreground += (img == i).type(torch.uint8)\n mask = get_largest_connected_component_mask(foreground, self.connectivity)\n img[foreground != mask] = 0\n output = torch.unsqueeze(img, dim=channel_dim)\n else:\n # one-hot data is assumed to have binary value in each channel\n if self.independent:\n for i in self.applied_labels:\n foreground = img[:, i, ...].type(torch.uint8)\n mask = get_largest_connected_component_mask(foreground, self.connectivity)\n img[:, i, ...][foreground != mask] = 0\n else:\n applied_img = img[:, self.applied_labels, ...].type(torch.uint8)\n foreground = torch.any(applied_img, dim=channel_dim)\n mask = get_largest_connected_component_mask(foreground, self.connectivity)\n background_mask = torch.unsqueeze(foreground != mask, dim=channel_dim)\n background_mask = torch.repeat_interleave(background_mask, len(self.applied_labels), dim=channel_dim)\n applied_img[background_mask] = 0\n img[:, self.applied_labels, ...] = applied_img.type(img.type())\n output = img\n\n return output\n\n\nclass LabelToContour(Transform):\n \"\"\"\n Return the contour of binary input images that only compose of 0 and 1, with Laplace kernel\n set as default for edge detection. Typical usage is to plot the edge of label or segmentation output.\n\n Args:\n kernel_type: the method applied to do edge detection, default is \"Laplace\".\n\n \"\"\"\n\n def __init__(self, kernel_type: str = \"Laplace\"):\n if kernel_type != \"Laplace\":\n raise NotImplementedError(\"currently, LabelToContour only supports Laplace kernel.\")\n self.kernel_type = kernel_type\n\n def __call__(self, img):\n \"\"\"\n Args:\n img: torch tensor data to extract the contour, with shape: [batch_size, channels, height, width[, depth]]\n\n Returns:\n A torch tensor with the same shape as img, note:\n 1. it's the binary classification result of whether a pixel is edge or not.\n 2. in order to keep the original shape of mask image, we use padding as default.\n 3. the edge detection is just approximate because it defects inherent to Laplace kernel,\n ideally the edge should be thin enough, but now it has a thickness.\n\n \"\"\"\n channels = img.shape[1]\n if img.ndim == 4:\n kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device)\n kernel = kernel.repeat(channels, 1, 1, 1)\n contour_img = F.conv2d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)\n elif img.ndim == 5:\n kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device)\n kernel[1, 1, 1] = 26\n kernel = kernel.repeat(channels, 1, 1, 1, 1)\n contour_img = F.conv3d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)\n else:\n raise RuntimeError(\"the dimensions of img should be 4 or 5.\")\n\n torch.clamp_(contour_img, min=0.0, max=1.0)\n return contour_img\n",
"# Copyright 2020 ponai Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for intensity adjustment\nhttps://github.com/Project-ponai/ponai/wiki/ponai_Design\n\"\"\"\n\nfrom typing import Optional, Tuple\nfrom warnings import warn\n\nimport numpy as np\n\nfrom ponai.transforms.compose import Transform, Randomizable\nfrom ponai.transforms.utils import rescale_array\n\n\nclass RandGaussianNoise(Randomizable, Transform):\n \"\"\"\n Add Gaussian noise to image.\n\n Args:\n prob: Probability to add Gaussian noise.\n mean (float or array of floats): Mean or “centre” of the distribution.\n std: Standard deviation (spread) of distribution.\n \"\"\"\n\n def __init__(self, prob: float = 0.1, mean=0.0, std: float = 0.1):\n self.prob = prob\n self.mean = mean\n self.std = std\n self._do_transform = False\n self._noise = None\n\n def randomize(self, im_shape) -> None: # type: ignore # see issue #495\n self._do_transform = self.R.random() < self.prob\n self._noise = self.R.normal(self.mean, self.R.uniform(0, self.std), size=im_shape)\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize(img.shape)\n return img + self._noise.astype(img.dtype) if self._do_transform else img\n\n\nclass ShiftIntensity(Transform):\n \"\"\"\n Shift intensity uniformly for the entire image with specified `offset`.\n\n Args:\n offset: offset value to shift the intensity of image.\n \"\"\"\n\n def __init__(self, offset: float) -> None:\n self.offset = offset\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n return (img + self.offset).astype(img.dtype)\n\n\nclass RandShiftIntensity(Randomizable, Transform):\n \"\"\"\n Randomly shift intensity with randomly picked offset.\n \"\"\"\n\n def __init__(self, offsets, prob: float = 0.1):\n \"\"\"\n Args:\n offsets(int, float, tuple or list): offset range to randomly shift.\n if single number, offset value is picked from (-offsets, offsets).\n prob: probability of shift.\n \"\"\"\n self.offsets = (-offsets, offsets) if not isinstance(offsets, (list, tuple)) else offsets\n assert len(self.offsets) == 2, \"offsets should be a number or pair of numbers.\"\n self.prob = prob\n self._do_transform = False\n\n def randomize(self) -> None: # type: ignore # see issue #495\n self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1])\n self._do_transform = self.R.random() < self.prob\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n shifter = ShiftIntensity(self._offset)\n return shifter(img)\n\n\nclass ScaleIntensity(Transform):\n \"\"\"\n Scale the intensity of input image to the given value range (minv, maxv).\n If `minv` and `maxv` not provided, use `factor` to scale image by ``v = v * (1 + factor)``.\n \"\"\"\n\n def __init__(\n self, minv: Optional[float] = 0.0, maxv: Optional[float] = 1.0, factor: Optional[float] = None\n ) -> None:\n \"\"\"\n Args:\n minv: minimum value of output data.\n maxv: maximum value of output data.\n factor: factor scale by ``v = v * (1 + factor)``.\n \"\"\"\n self.minv = minv\n self.maxv = maxv\n self.factor = factor\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n if self.minv is not None and self.maxv is not None:\n return rescale_array(img, self.minv, self.maxv, img.dtype)\n else:\n return (img * (1 + self.factor)).astype(img.dtype)\n\n\nclass Whitening(Transform):\n \"\"\"\n Scale the intensity of input image to the given value range (minv, maxv).\n If `minv` and `maxv` not provided, use `factor` to scale image by ``v = v * (1 + factor)``.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n minv: minimum value of output data.\n maxv: maximum value of output data.\n factor: factor scale by ``v = v * (1 + factor)``.\n \"\"\"\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n return (img - np.mean(img)) / np.std(img)\n\n\nclass RandScaleIntensity(Randomizable, Transform):\n \"\"\"\n Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor`\n is randomly picked from (factors[0], factors[0]).\n \"\"\"\n\n def __init__(self, factors, prob: float = 0.1):\n \"\"\"\n Args:\n factors(float, tuple or list): factor range to randomly scale by ``v = v * (1 + factor)``.\n if single number, factor value is picked from (-factors, factors).\n prob: probability of scale.\n\n \"\"\"\n self.factors = (-factors, factors) if not isinstance(factors, (list, tuple)) else factors\n assert len(self.factors) == 2, \"factors should be a number or pair of numbers.\"\n self.prob = prob\n self._do_transform = False\n\n def randomize(self) -> None: # type: ignore # see issue #495\n self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])\n self._do_transform = self.R.random() < self.prob\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor)\n return scaler(img)\n\n\nclass NormalizeIntensity(Transform):\n \"\"\"\n Normalize input based on provided args, using calculated mean and std if not provided\n (shape of subtrahend and divisor must match. if 0, entire volume uses same subtrahend and\n divisor, otherwise the shape can have dimension 1 for channels).\n This transform can normalize only non-zero values or entire image, and can also calculate\n mean and std on each channel separately.\n\n Args:\n subtrahend (ndarray): the amount to subtract by (usually the mean).\n divisor (ndarray): the amount to divide by (usually the standard deviation).\n nonzero: whether only normalize non-zero values.\n channel_wise: if using calculated mean and std, calculate on each channel separately\n or calculate on the entire image directly.\n \"\"\"\n\n def __init__(\n self,\n subtrahend: Optional[np.ndarray] = None,\n divisor: Optional[np.ndarray] = None,\n nonzero: bool = False,\n channel_wise: bool = False,\n ):\n if subtrahend is not None or divisor is not None:\n assert isinstance(subtrahend, np.ndarray) and isinstance(\n divisor, np.ndarray\n ), \"subtrahend and divisor must be set in pair and in numpy array.\"\n self.subtrahend = subtrahend\n self.divisor = divisor\n self.nonzero = nonzero\n self.channel_wise = channel_wise\n\n def _normalize(self, img):\n slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=np.bool_)\n if np.any(slices):\n if self.subtrahend is not None and self.divisor is not None:\n img[slices] = (img[slices] - self.subtrahend[slices]) / self.divisor[slices]\n else:\n img[slices] = (img[slices] - np.mean(img[slices])) / np.std(img[slices])\n return img\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`, assuming `img` is a channel-first array if `self.channel_wise` is True,\n \"\"\"\n if self.channel_wise:\n for i, d in enumerate(img):\n img[i] = self._normalize(d)\n else:\n img = self._normalize(img)\n\n return img\n\n\nclass ThresholdIntensity(Transform):\n \"\"\"\n Filter the intensity values of whole image to below threshold or above threshold.\n And fill the remaining parts of the image to the `cval` value.\n\n Args:\n threshold: the threshold to filter intensity values.\n above: filter values above the threshold or below the threshold, default is True.\n cval: value to fill the remaining parts of the image, default is 0.\n \"\"\"\n\n def __init__(self, threshold: float, above: bool = True, cval: float = 0.0) -> None:\n threshold = float(threshold)\n assert isinstance(threshold, float), \"must set the threshold to filter intensity.\"\n self.threshold: float = threshold\n self.above: bool = above\n self.cval: float = cval\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n return np.where(img > self.threshold if self.above else img < self.threshold, img, self.cval).astype(img.dtype)\n\n\nclass ScaleIntensityRange(Transform):\n \"\"\"\n Apply specific intensity scaling to the whole numpy array.\n Scaling from [a_min, a_max] to [b_min, b_max] with clip option.\n\n Args:\n a_min: intensity original range min.\n a_max: intensity original range max.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n \"\"\"\n\n def __init__(self, a_min: float, a_max: float, b_min: float, b_max: float, clip: bool = False) -> None:\n self.a_min = a_min\n self.a_max = a_max\n self.b_min = b_min\n self.b_max = b_max\n self.clip = clip\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n if self.a_max - self.a_min == 0.0:\n warn(\"Divide by zero (a_min == a_max)\", Warning)\n return img - self.a_min + self.b_min\n\n img = (img - self.a_min) / (self.a_max - self.a_min)\n img = img * (self.b_max - self.b_min) + self.b_min\n if self.clip:\n img = np.clip(img, self.b_min, self.b_max)\n\n return img\n\n\nclass AdjustContrast(Transform):\n \"\"\"\n Changes image intensity by gamma. Each pixel/voxel intensity is updated as::\n\n x = ((x - min) / intensity_range) ^ gamma * intensity_range + min\n\n Args:\n gamma: gamma value to adjust the contrast as function.\n \"\"\"\n\n def __init__(self, gamma: float) -> None:\n assert isinstance(gamma, float), \"gamma must be a float number.\"\n self.gamma = gamma\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n epsilon = 1e-7\n img_min = img.min()\n img_range = img.max() - img_min\n return np.power(((img - img_min) / float(img_range + epsilon)), self.gamma) * img_range + img_min\n\n\nclass RandAdjustContrast(Randomizable, Transform):\n \"\"\"\n Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as::\n\n x = ((x - min) / intensity_range) ^ gamma * intensity_range + min\n\n Args:\n prob: Probability of adjustment.\n gamma (tuple of float or float): Range of gamma values.\n If single number, value is picked from (0.5, gamma), default is (0.5, 4.5).\n \"\"\"\n\n def __init__(self, prob: float = 0.1, gamma=(0.5, 4.5)):\n self.prob = prob\n self.gamma: Tuple[float, float]\n\n if not isinstance(gamma, (tuple, list)):\n assert gamma > 0.5, \"if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)\"\n self.gamma = (0.5, gamma)\n else:\n assert len(gamma) == 2, \"gamma should be a number or pair of numbers.\"\n self.gamma = (gamma[0], gamma[1])\n\n self._do_transform = False\n self.gamma_value = None\n\n def randomize(self) -> None: # type: ignore # see issue #495\n self._do_transform = self.R.random_sample() < self.prob\n self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1])\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n adjuster = AdjustContrast(self.gamma_value)\n return adjuster(img)\n\n\nclass ScaleIntensityRangePercentiles(Transform):\n \"\"\"\n Apply range scaling to a numpy array based on the intensity distribution of the input.\n\n By default this transform will scale from [lower_intensity_percentile, upper_intensity_percentile] to [b_min, b_max], where\n {lower,upper}_intensity_percentile are the intensity values at the corresponding percentiles of ``img``.\n\n The ``relative`` parameter can also be set to scale from [lower_intensity_percentile, upper_intensity_percentile] to the\n lower and upper percentiles of the output range [b_min, b_max]\n\n For example:\n\n .. code-block:: python\n :emphasize-lines: 11, 22\n\n image = np.array(\n [[[1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5]]])\n\n # Scale from lower and upper image intensity percentiles\n # to output range [b_min, b_max]\n scaler = ScaleIntensityRangePercentiles(10, 90, 0, 200, False, False)\n print(scaler(image))\n [[[0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.]]]\n\n # Scale from lower and upper image intensity percentiles\n # to lower and upper percentiles of the output range [b_min, b_max]\n rel_scaler = ScaleIntensityRangePercentiles(10, 90, 0, 200, False, True)\n print(rel_scaler(image))\n [[[20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.]]]\n\n\n Args:\n lower: lower intensity percentile.\n upper: upper intensity percentile.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n relative: whether to scale to the corresponding percentiles of [b_min, b_max].\n \"\"\"\n\n def __init__(\n self, lower: float, upper: float, b_min: float, b_max: float, clip: bool = False, relative: bool = False\n ) -> None:\n assert 0.0 <= lower <= 100.0, \"Percentiles must be in the range [0, 100]\"\n assert 0.0 <= upper <= 100.0, \"Percentiles must be in the range [0, 100]\"\n self.lower = lower\n self.upper = upper\n self.b_min = b_min\n self.b_max = b_max\n self.clip = clip\n self.relative = relative\n\n def __call__(self, img):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n a_min = np.percentile(img, self.lower)\n a_max = np.percentile(img, self.upper)\n b_min = self.b_min\n b_max = self.b_max\n\n if self.relative:\n b_min = ((self.b_max - self.b_min) * (self.lower / 100.0)) + self.b_min\n b_max = ((self.b_max - self.b_min) * (self.upper / 100.0)) + self.b_min\n\n scalar = ScaleIntensityRange(a_min=a_min, a_max=a_max, b_min=b_min, b_max=b_max, clip=False)\n img = scalar(img)\n\n if self.clip:\n img = np.clip(img, self.b_min, self.b_max)\n\n return img\n"
] | [
[
"torch.utils.data.get_worker_info"
],
[
"torch.sigmoid",
"torch.clamp_",
"torch.softmax",
"torch.ones",
"torch.nn.functional.conv3d",
"torch.nn.functional.conv2d",
"torch.zeros_like",
"torch.unsqueeze",
"torch.tensor",
"torch.any",
"torch.squeeze",
"torch.argmax"
],
[
"numpy.clip",
"numpy.percentile",
"numpy.ones",
"numpy.std",
"numpy.mean",
"numpy.any",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kylemath/mne-python | [
"586c5d918a673ab5d5c92ffb4479fe57fee5559d"
] | [
"mne/channels/tests/test_montage.py"
] | [
"# Author: Teon Brooks <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom itertools import chain\nimport os\nimport os.path as op\n\nimport pytest\n\nimport numpy as np\nfrom functools import partial\nfrom string import ascii_lowercase\n\nfrom numpy.testing import (assert_array_equal,\n assert_allclose, assert_equal)\nimport matplotlib.pyplot as plt\n\nfrom mne import __file__ as _mne_file, create_info, read_evokeds, pick_types\nfrom mne.fixes import nullcontext\nfrom mne.utils._testing import assert_object_equal\nfrom mne.channels import (get_builtin_montages, DigMontage, read_dig_dat,\n read_dig_egi, read_dig_captrak, read_dig_fif,\n make_standard_montage, read_custom_montage,\n compute_dev_head_t, make_dig_montage,\n read_dig_polhemus_isotrak, compute_native_head_t,\n read_polhemus_fastscan,\n read_dig_hpts)\nfrom mne.channels.montage import transform_to_head, _check_get_coord_frame\nfrom mne.utils import run_tests_if_main, assert_dig_allclose\nfrom mne.bem import _fit_sphere\nfrom mne.io.constants import FIFF\nfrom mne.io._digitization import (_format_dig_points,\n _get_fid_coords, _get_dig_eeg,\n _count_points_by_type)\nfrom mne.transforms import _ensure_trans\nfrom mne.viz._3d import _fiducial_coords\n\nfrom mne.io.kit import read_mrk\nfrom mne.io import (read_raw_brainvision, read_raw_egi, read_raw_fif,\n read_fiducials, __file__ as _MNE_IO_FILE)\n\nfrom mne.io import RawArray\nfrom mne.datasets import testing\nfrom mne.io.brainvision import __file__ as _BRAINVISON_FILE\n\n\ndata_path = testing.data_path(download=False)\nfif_dig_montage_fname = op.join(data_path, 'montage', 'eeganes07.fif')\negi_dig_montage_fname = op.join(data_path, 'montage', 'coordinates.xml')\negi_raw_fname = op.join(data_path, 'montage', 'egi_dig_test.raw')\negi_fif_fname = op.join(data_path, 'montage', 'egi_dig_raw.fif')\nbvct_dig_montage_fname = op.join(data_path, 'montage', 'captrak_coords.bvct')\nbv_raw_fname = op.join(data_path, 'montage', 'bv_dig_test.vhdr')\nbv_fif_fname = op.join(data_path, 'montage', 'bv_dig_raw.fif')\nlocs_montage_fname = op.join(data_path, 'EEGLAB', 'test_chans.locs')\nevoked_fname = op.join(data_path, 'montage', 'level2_raw-ave.fif')\neeglab_fname = op.join(data_path, 'EEGLAB', 'test_raw.set')\nbdf_fname1 = op.join(data_path, 'BDF', 'test_generator_2.bdf')\nbdf_fname2 = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf')\negi_fname1 = op.join(data_path, 'EGI', 'test_egi.mff')\ncnt_fname = op.join(data_path, 'CNT', 'scan41_short.cnt')\n\nio_dir = op.dirname(_MNE_IO_FILE)\nkit_dir = op.join(io_dir, 'kit', 'tests', 'data')\nelp = op.join(kit_dir, 'test_elp.txt')\nhsp = op.join(kit_dir, 'test_hsp.txt')\nhpi = op.join(kit_dir, 'test_mrk.sqd')\nbv_fname = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')\nfif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')\nedf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test.edf')\nbdf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test_bdf_eeglab.mat')\negi_fname2 = op.join(io_dir, 'egi', 'tests', 'data', 'test_egi.raw')\nvhdr_path = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')\nctf_fif_fname = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')\nnicolet_fname = op.join(io_dir, 'nicolet', 'tests', 'data',\n 'test_nicolet_raw.data')\n\n\ndef _make_toy_raw(n_channels):\n return RawArray(\n data=np.empty([n_channels, 1]),\n info=create_info(\n ch_names=list(ascii_lowercase[:n_channels]),\n sfreq=1, ch_types='eeg'\n )\n )\n\n\ndef _make_toy_dig_montage(n_channels, **kwargs):\n return make_dig_montage(\n ch_pos=dict(zip(\n list(ascii_lowercase[:n_channels]),\n np.arange(n_channels * 3).reshape(n_channels, 3),\n )),\n **kwargs\n )\n\n\ndef _get_dig_montage_pos(montage):\n return np.array([d['r'] for d in _get_dig_eeg(montage.dig)])\n\n\ndef test_dig_montage_trans(tmpdir):\n \"\"\"Test getting a trans from montage.\"\"\"\n nasion, lpa, rpa, *ch_pos = np.random.RandomState(0).randn(10, 3)\n ch_pos = {f'EEG{ii:3d}': pos for ii, pos in enumerate(ch_pos, 1)}\n montage = make_dig_montage(ch_pos, nasion=nasion, lpa=lpa, rpa=rpa,\n coord_frame='mri')\n trans = compute_native_head_t(montage)\n _ensure_trans(trans)\n # ensure that we can save and load it, too\n fname = tmpdir.join('temp-mon.fif')\n _check_roundtrip(montage, fname, 'mri')\n\n\ndef test_fiducials():\n \"\"\"Test handling of fiducials.\"\"\"\n # Eventually the code used here should be unified with montage.py, but for\n # now it uses code in odd places\n for fname in (fif_fname, ctf_fif_fname):\n fids, coord_frame = read_fiducials(fname)\n points = _fiducial_coords(fids, coord_frame)\n assert points.shape == (3, 3)\n # Fids\n assert_allclose(points[:, 2], 0., atol=1e-6)\n assert_allclose(points[::2, 1], 0., atol=1e-6)\n assert points[2, 0] > 0 # RPA\n assert points[0, 0] < 0 # LPA\n # Nasion\n assert_allclose(points[1, 0], 0., atol=1e-6)\n assert points[1, 1] > 0\n\n\ndef test_documented():\n \"\"\"Test that standard montages are documented.\"\"\"\n docs = make_standard_montage.__doc__\n lines = [line[4:] for line in docs.splitlines()]\n start = stop = None\n for li, line in enumerate(lines):\n if line.startswith('====') and li < len(lines) - 2 and \\\n lines[li + 1].startswith('Kind') and\\\n lines[li + 2].startswith('===='):\n start = li + 3\n elif start is not None and li > start and line.startswith('===='):\n stop = li\n break\n assert (start is not None)\n assert (stop is not None)\n kinds = [line.split(' ')[0] for line in lines[start:stop]]\n kinds = [kind for kind in kinds if kind != '']\n montages = os.listdir(op.join(op.dirname(_mne_file), 'channels', 'data',\n 'montages'))\n montages = sorted(op.splitext(m)[0] for m in montages)\n assert_equal(len(set(montages)), len(montages))\n assert_equal(len(set(kinds)), len(kinds), err_msg=str(sorted(kinds)))\n assert_equal(set(montages), set(kinds))\n\n\[email protected]('reader, file_content, expected_dig, ext, warning', [\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('FidNz 0 9.071585155 -2.359754454\\n'\n 'FidT9 -6.711765 0.040402876 -3.251600355\\n'\n 'very_very_very_long_name -5.831241498 -4.494821698 4.955347697\\n'\n 'Cz 0 0 1\\n'\n 'Cz 0 0 8.899186843'),\n make_dig_montage(\n ch_pos={\n 'very_very_very_long_name': [-5.8312416, -4.4948215, 4.9553475], # noqa\n 'Cz': [0., 0., 8.899187],\n },\n nasion=[0., 9.071585, -2.3597546],\n lpa=[-6.711765, 0.04040287, -3.2516003],\n rpa=None,\n ),\n 'sfp',\n (RuntimeWarning, r'Duplicate.*last will be used for Cz \\(2\\)'),\n id='sfp_duplicate'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('FidNz 0 9.071585155 -2.359754454\\n'\n 'FidT9 -6.711765 0.040402876 -3.251600355\\n'\n 'headshape 1 2 3\\n'\n 'headshape 4 5 6\\n'\n 'Cz 0 0 8.899186843'),\n make_dig_montage(\n hsp=[\n [1, 2, 3],\n [4, 5, 6],\n ],\n ch_pos={\n 'Cz': [0., 0., 8.899187],\n },\n nasion=[0., 9.071585, -2.3597546],\n lpa=[-6.711765, 0.04040287, -3.2516003],\n rpa=None,\n ),\n 'sfp',\n None,\n id='sfp_headshape'),\n\n pytest.param(\n partial(read_custom_montage, head_size=1),\n ('1\t 0\t 0.50669\t FPz\\n'\n '2\t 23\t 0.71\t \tEOG1\\n'\n '3\t -39.947\t 0.34459\t F3\\n'\n '4\t 0\t 0.25338\t Fz\\n'),\n make_dig_montage(\n ch_pos={\n 'EOG1': [0.30873816, 0.72734152, -0.61290705],\n 'F3': [-0.56705965, 0.67706631, 0.46906776],\n 'FPz': [0., 0.99977915, -0.02101571],\n 'Fz': [0., 0.71457525, 0.69955859],\n },\n nasion=None, lpa=None, rpa=None, coord_frame='head',\n ),\n 'loc',\n None,\n id='EEGLAB'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None, coord_frame='mri'),\n ('// MatLab Sphere coordinates [degrees] Cartesian coordinates\\n' # noqa: E501\n '// Label Theta Phi Radius X Y Z off sphere surface\\n' # noqa: E501\n 'E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011\\n' # noqa: E501\n 'E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000\\n' # noqa: E501\n 'E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000\\n' # noqa: E501\n 'E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022'), # noqa: E501\n make_dig_montage(\n ch_pos={\n 'E1': [0.7677, 0.5934, -0.2419],\n 'E3': [0.6084, 0.7704, 0.1908],\n 'E31': [0., 0.9816, -0.1908],\n 'E61': [-0.8857, 0.3579, -0.2957],\n },\n nasion=None, lpa=None, rpa=None, coord_frame='mri',\n ),\n 'csd',\n None,\n id='matlab'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('# ASA electrode file\\nReferenceLabel avg\\nUnitPosition mm\\n'\n 'NumberPositions= 68\\n'\n 'Positions\\n'\n '-86.0761 -19.9897 -47.9860\\n'\n '85.7939 -20.0093 -48.0310\\n'\n '0.0083 86.8110 -39.9830\\n'\n '-86.0761 -24.9897 -67.9860\\n'\n 'Labels\\nLPA\\nRPA\\nNz\\nDummy\\n'),\n make_dig_montage(\n ch_pos={\n 'Dummy': [-0.0860761, -0.0249897, -0.067986],\n },\n nasion=[8.3000e-06, 8.6811e-02, -3.9983e-02],\n lpa=[-0.0860761, -0.0199897, -0.047986],\n rpa=[0.0857939, -0.0200093, -0.048031],\n ),\n 'elc',\n None,\n id='ASA electrode'),\n\n pytest.param(\n partial(read_custom_montage, head_size=1),\n ('Site Theta Phi\\n'\n 'Fp1 -92 -72\\n'\n 'Fp2 92 72\\n'\n 'very_very_very_long_name -92 72\\n'\n 'O2 92 -90\\n'),\n make_dig_montage(\n ch_pos={\n 'Fp1': [-0.30882875, 0.95047716, -0.0348995],\n 'Fp2': [0.30882875, 0.95047716, -0.0348995],\n 'very_very_very_long_name': [-0.30882875, -0.95047716, -0.0348995], # noqa\n 'O2': [6.11950389e-17, -9.99390827e-01, -3.48994967e-02]\n },\n nasion=None, lpa=None, rpa=None,\n ),\n 'txt',\n None,\n id='generic theta-phi (txt)'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('346\\n' # XXX: this should actually race an error 346 != 4\n 'FID\\t LPA\\t -120.03\\t 0\\t 85\\n'\n 'FID\\t RPA\\t 120.03\\t 0\\t 85\\n'\n 'FID\\t Nz\\t 114.03\\t 90\\t 85\\n'\n 'EEG\\t F3\\t -62.027\\t -50.053\\t 85\\n'\n 'EEG\\t Fz\\t 45.608\\t 90\\t 85\\n'\n 'EEG\\t F4\\t 62.01\\t 50.103\\t 85\\n'\n 'EEG\\t FCz\\t 68.01\\t 58.103\\t 85\\n'),\n make_dig_montage(\n ch_pos={\n 'F3': [-0.48200427, 0.57551063, 0.39869712],\n 'Fz': [3.71915931e-17, 6.07384809e-01, 5.94629038e-01],\n 'F4': [0.48142596, 0.57584026, 0.39891983],\n 'FCz': [0.41645989, 0.66914889, 0.31827805],\n },\n nasion=[4.75366562e-17, 7.76332511e-01, -3.46132681e-01],\n lpa=[-7.35898963e-01, 9.01216309e-17, -4.25385374e-01],\n rpa=[0.73589896, 0., -0.42538537],\n ),\n 'elp',\n None,\n id='BESA spherical model'),\n\n pytest.param(\n partial(read_dig_hpts, unit='m'),\n ('eeg Fp1 -95.0 -3. -3.\\n'\n 'eeg AF7 -1 -1 -3\\n'\n 'eeg A3 -2 -2 2\\n'\n 'eeg A 0 0 0'),\n make_dig_montage(\n ch_pos={\n 'A': [0., 0., 0.], 'A3': [-2., -2., 2.],\n 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.],\n },\n nasion=None, lpa=None, rpa=None,\n ),\n 'hpts',\n None,\n id='legacy mne-c'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\\n'\n '<!-- Generated by EasyCap Configurator 19.05.2014 -->\\n'\n '<Electrodes defaults=\"false\">\\n'\n ' <Electrode>\\n'\n ' <Name>Fp1</Name>\\n'\n ' <Theta>-90</Theta>\\n'\n ' <Phi>-72</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>1</Number>\\n'\n ' </Electrode>\\n'\n ' <Electrode>\\n'\n ' <Name>Fz</Name>\\n'\n ' <Theta>45</Theta>\\n'\n ' <Phi>90</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>2</Number>\\n'\n ' </Electrode>\\n'\n ' <Electrode>\\n'\n ' <Name>F3</Name>\\n'\n ' <Theta>-60</Theta>\\n'\n ' <Phi>-51</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>3</Number>\\n'\n ' </Electrode>\\n'\n ' <Electrode>\\n'\n ' <Name>F7</Name>\\n'\n ' <Theta>-90</Theta>\\n'\n ' <Phi>-36</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>4</Number>\\n'\n ' </Electrode>\\n'\n '</Electrodes>'),\n make_dig_montage(\n ch_pos={\n 'Fp1': [-3.09016994e-01, 9.51056516e-01, 6.12323400e-17],\n 'Fz': [4.32978028e-17, 7.07106781e-01, 7.07106781e-01],\n 'F3': [-0.54500745, 0.67302815, 0.5],\n 'F7': [-8.09016994e-01, 5.87785252e-01, 6.12323400e-17],\n },\n nasion=None, lpa=None, rpa=None,\n ),\n 'bvef',\n None,\n id='brainvision'),\n])\ndef test_montage_readers(\n reader, file_content, expected_dig, ext, warning, tmpdir\n):\n \"\"\"Test that we have an equivalent of read_montage for all file formats.\"\"\"\n fname = op.join(str(tmpdir), 'test.{ext}'.format(ext=ext))\n with open(fname, 'w') as fid:\n fid.write(file_content)\n\n if warning is None:\n ctx = nullcontext()\n else:\n ctx = pytest.warns(warning[0], match=warning[1])\n with ctx:\n dig_montage = reader(fname)\n assert isinstance(dig_montage, DigMontage)\n\n actual_ch_pos = dig_montage._get_ch_pos()\n expected_ch_pos = expected_dig._get_ch_pos()\n for kk in actual_ch_pos:\n assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5)\n assert len(dig_montage.dig) == len(expected_dig.dig)\n for d1, d2 in zip(dig_montage.dig, expected_dig.dig):\n assert d1['coord_frame'] == d2['coord_frame']\n for key in ('coord_frame', 'ident', 'kind'):\n assert isinstance(d1[key], int)\n assert isinstance(d2[key], int)\n with pytest.warns(None) as w:\n xform = compute_native_head_t(dig_montage)\n assert xform['to'] == FIFF.FIFFV_COORD_HEAD\n assert xform['from'] == FIFF.FIFFV_COORD_UNKNOWN\n n = int(np.allclose(xform['trans'], np.eye(4)))\n assert len(w) == n\n\n\[email protected]_testing_data\ndef test_read_locs():\n \"\"\"Test reading EEGLAB locs.\"\"\"\n data = read_custom_montage(locs_montage_fname)._get_ch_pos()\n assert_allclose(\n actual=np.stack(\n [data[kk] for kk in ('FPz', 'EOG1', 'F3', 'Fz')] # 4 random chs\n ),\n desired=[[0., 0.094979, -0.001996],\n [0.02933, 0.069097, -0.058226],\n [-0.053871, 0.064321, 0.044561],\n [0., 0.067885, 0.066458]],\n atol=1e-6\n )\n\n\ndef test_read_dig_dat(tmpdir):\n \"\"\"Test reading *.dat electrode locations.\"\"\"\n rows = [\n ['Nasion', 78, 0.00, 1.00, 0.00],\n ['Left', 76, -1.00, 0.00, 0.00],\n ['Right', 82, 1.00, -0.00, 0.00],\n ['O2', 69, -0.50, -0.90, 0.05],\n ['O2', 68, 0.00, 0.01, 0.02],\n ['Centroid', 67, 0.00, 0.00, 0.00],\n ]\n # write mock test.dat file\n temp_dir = str(tmpdir)\n fname_temp = op.join(temp_dir, 'test.dat')\n with open(fname_temp, 'w') as fid:\n for row in rows:\n name = row[0].rjust(10)\n data = '\\t'.join(map(str, row[1:]))\n fid.write(\"%s\\t%s\\n\" % (name, data))\n # construct expected value\n idents = {\n 78: FIFF.FIFFV_POINT_NASION,\n 76: FIFF.FIFFV_POINT_LPA,\n 82: FIFF.FIFFV_POINT_RPA,\n 68: 1,\n 69: 1,\n }\n kinds = {\n 78: FIFF.FIFFV_POINT_CARDINAL,\n 76: FIFF.FIFFV_POINT_CARDINAL,\n 82: FIFF.FIFFV_POINT_CARDINAL,\n 69: FIFF.FIFFV_POINT_EEG,\n 68: FIFF.FIFFV_POINT_EEG,\n }\n target = {row[0]: {'r': row[2:], 'ident': idents[row[1]],\n 'kind': kinds[row[1]], 'coord_frame': 0}\n for row in rows[:-1]}\n assert_allclose(target['O2']['r'], [0, 0.01, 0.02])\n # read it\n with pytest.warns(RuntimeWarning, match=r'Duplic.*for O2 \\(2\\)'):\n dig = read_dig_dat(fname_temp)\n assert set(dig.ch_names) == {'O2'}\n keys = chain(['Left', 'Nasion', 'Right'], dig.ch_names)\n target = [target[k] for k in keys]\n assert dig.dig == target\n\n\ndef test_read_dig_montage_using_polhemus_fastscan():\n \"\"\"Test FastScan.\"\"\"\n N_EEG_CH = 10\n\n my_electrode_positions = read_polhemus_fastscan(\n op.join(kit_dir, 'test_elp.txt')\n )\n\n montage = make_dig_montage(\n # EEG_CH\n ch_pos=dict(zip(ascii_lowercase[:N_EEG_CH],\n np.random.RandomState(0).rand(N_EEG_CH, 3))),\n # NO NAMED points\n nasion=my_electrode_positions[0],\n lpa=my_electrode_positions[1],\n rpa=my_electrode_positions[2],\n hpi=my_electrode_positions[3:],\n hsp=read_polhemus_fastscan(op.join(kit_dir, 'test_hsp.txt')),\n\n # Other defaults\n coord_frame='unknown'\n )\n\n assert repr(montage) == (\n '<DigMontage | '\n '500 extras (headshape), 5 HPIs, 3 fiducials, 10 channels>'\n ) # XXX: is this wrong? extra is not in headspace, is it?\n\n assert set([d['coord_frame'] for d in montage.dig]) == {\n FIFF.FIFFV_COORD_UNKNOWN\n } # XXX: so far we build everything in 'unknown'\n\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': [0.001393, 0.0131613, -0.0046967],\n 'lpa': [-0.0624997, -0.0737271, 0.07996],\n 'rpa': [-0.0748957, 0.0873785, 0.0811943],\n }\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_allclose(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n\ndef test_read_dig_montage_using_polhemus_fastscan_error_handling(tmpdir):\n \"\"\"Test reading Polhemus FastSCAN errors.\"\"\"\n with open(op.join(kit_dir, 'test_elp.txt')) as fid:\n content = fid.read().replace('FastSCAN', 'XxxxXXXX')\n\n fname = str(tmpdir.join('faulty_FastSCAN.txt'))\n with open(fname, 'w') as fid:\n fid.write(content)\n\n with pytest.raises(ValueError, match='not contain Polhemus FastSCAN'):\n _ = read_polhemus_fastscan(fname)\n\n EXPECTED_ERR_MSG = \"allowed value is '.txt', but got '.bar' instead\"\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = read_polhemus_fastscan(fname=tmpdir.join('foo.bar'))\n\n\ndef test_read_dig_polhemus_isotrak_hsp():\n \"\"\"Test reading Polhemus IsoTrak HSP file.\"\"\"\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),\n 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),\n 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),\n }\n montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.hsp'),\n ch_names=None)\n assert repr(montage) == (\n '<DigMontage | '\n '500 extras (headshape), 0 HPIs, 3 fiducials, 0 channels>'\n )\n\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n\ndef test_read_dig_polhemus_isotrak_elp():\n \"\"\"Test reading Polhemus IsoTrak ELP file.\"\"\"\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),\n 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),\n 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),\n }\n montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.elp'),\n ch_names=None)\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 5 HPIs, 3 fiducials, 0 channels>'\n )\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n\[email protected](scope='module')\ndef isotrak_eeg(tmpdir_factory):\n \"\"\"Mock isotrak file with EEG positions.\"\"\"\n _SEED = 42\n N_ROWS, N_COLS = 5, 3\n content = np.random.RandomState(_SEED).randn(N_ROWS, N_COLS)\n\n fname = tmpdir_factory.mktemp('data').join('test.eeg')\n with open(str(fname), 'w') as fid:\n fid.write((\n '3\t200\\n'\n '//Shape file\\n'\n '//Minor revision number\\n'\n '2\\n'\n '//Subject Name\\n'\n '%N\tName \\n'\n '////Shape code, number of digitized points\\n'\n ))\n fid.write('0 {rows:d}\\n'.format(rows=N_ROWS))\n fid.write((\n '//Position of fiducials X+, Y+, Y- on the subject\\n'\n '%F\t0.11056\t-5.421e-19\t0\t\\n'\n '%F\t-0.00021075\t0.080793\t-7.5894e-19\t\\n'\n '%F\t0.00021075\t-0.080793\t-2.8731e-18\t\\n'\n '//No of rows, no of columns; position of digitized points\\n'\n ))\n fid.write('{rows:d} {cols:d}\\n'.format(rows=N_ROWS, cols=N_COLS))\n for row in content:\n fid.write('\\t'.join('%0.18e' % cell for cell in row) + '\\n')\n\n return str(fname)\n\n\ndef test_read_dig_polhemus_isotrak_eeg(isotrak_eeg):\n \"\"\"Test reading Polhemus IsoTrak EEG positions.\"\"\"\n N_CHANNELS = 5\n _SEED = 42\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),\n 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),\n 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),\n }\n ch_names = ['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS)]\n EXPECTED_CH_POS = dict(zip(\n ch_names, np.random.RandomState(_SEED).randn(N_CHANNELS, 3)))\n\n montage = read_dig_polhemus_isotrak(fname=isotrak_eeg, ch_names=ch_names)\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 0 HPIs, 3 fiducials, 5 channels>'\n )\n\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n for kk, dig_point in zip(montage.ch_names, _get_dig_eeg(montage.dig)):\n assert_array_equal(dig_point['r'], EXPECTED_CH_POS[kk])\n assert dig_point['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN\n\n\ndef test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmpdir):\n \"\"\"Test errors in reading Polhemus IsoTrak files.\n\n 1 - matching ch_names and number of points in isotrak file.\n 2 - error for unsupported file extensions.\n \"\"\"\n # Check ch_names\n N_CHANNELS = 5\n EXPECTED_ERR_MSG = \"not match the number of points.*Expected.*5, given 47\"\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = read_dig_polhemus_isotrak(\n fname=isotrak_eeg,\n ch_names=['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS + 42)]\n )\n\n # Check fname extensions\n fname = op.join(tmpdir, 'foo.bar')\n with pytest.raises(\n ValueError,\n match=\"Allowed val.*'.hsp', '.elp', and '.eeg', but got '.bar' instead\"\n ):\n _ = read_dig_polhemus_isotrak(fname=fname, ch_names=None)\n\n\ndef test_combining_digmontage_objects():\n \"\"\"Test combining different DigMontage objects.\"\"\"\n rng = np.random.RandomState(0)\n fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3)))\n\n # hsp positions are [1X, 1X, 1X]\n hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.))\n hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.))\n hsp3 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 13.))\n\n # hpi positions are [2X, 2X, 2X]\n hpi1 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 21.))\n hpi2 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 22.))\n hpi3 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 23.))\n\n # channels have positions at 40s, 50s, and 60s.\n ch_pos1 = make_dig_montage(\n **fiducials,\n ch_pos={'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43]}\n )\n ch_pos2 = make_dig_montage(\n **fiducials,\n ch_pos={'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53]}\n )\n ch_pos3 = make_dig_montage(\n **fiducials,\n ch_pos={'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63]}\n )\n\n montage = (\n DigMontage() + hsp1 + hsp2 + hsp3 + hpi1 + hpi2 + hpi3 + ch_pos1 +\n ch_pos2 + ch_pos3\n )\n assert repr(montage) == (\n '<DigMontage | '\n '6 extras (headshape), 6 HPIs, 3 fiducials, 9 channels>'\n )\n\n EXPECTED_MONTAGE = make_dig_montage(\n **fiducials,\n hsp=np.concatenate([np.full((2, 3), 11.), np.full((2, 3), 12.),\n np.full((2, 3), 13.)]),\n hpi=np.concatenate([np.full((2, 3), 21.), np.full((2, 3), 22.),\n np.full((2, 3), 23.)]),\n ch_pos={\n 'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43],\n 'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53],\n 'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63],\n }\n )\n\n # Do some checks to ensure they are the same DigMontage\n assert len(montage.ch_names) == len(EXPECTED_MONTAGE.ch_names)\n assert all([c in montage.ch_names for c in EXPECTED_MONTAGE.ch_names])\n actual_occurrences = _count_points_by_type(montage.dig)\n expected_occurrences = _count_points_by_type(EXPECTED_MONTAGE.dig)\n assert actual_occurrences == expected_occurrences\n\n\ndef test_combining_digmontage_forbiden_behaviors():\n \"\"\"Test combining different DigMontage objects with repeated names.\"\"\"\n rng = np.random.RandomState(0)\n fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3)))\n dig1 = make_dig_montage(\n **fiducials,\n ch_pos=dict(zip(list('abc'), rng.rand(3, 3))),\n )\n dig2 = make_dig_montage(\n **fiducials,\n ch_pos=dict(zip(list('bcd'), rng.rand(3, 3))),\n )\n dig2_wrong_fid = make_dig_montage(\n nasion=rng.rand(3), lpa=rng.rand(3), rpa=rng.rand(3),\n ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))),\n )\n dig2_wrong_coordframe = make_dig_montage(\n **fiducials,\n ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))),\n coord_frame='meg'\n )\n\n EXPECTED_ERR_MSG = \"Cannot.*duplicated channel.*found: \\'b\\', \\'c\\'.\"\n with pytest.raises(RuntimeError, match=EXPECTED_ERR_MSG):\n _ = dig1 + dig2\n\n with pytest.raises(RuntimeError, match='fiducial locations do not match'):\n _ = dig1 + dig2_wrong_fid\n\n with pytest.raises(RuntimeError, match='not in the same coordinate '):\n _ = dig1 + dig2_wrong_coordframe\n\n\ndef test_set_dig_montage():\n \"\"\"Test setting DigMontage with toy understandable points.\"\"\"\n N_CHANNELS, N_HSP, N_HPI = 3, 2, 1\n ch_names = list(ascii_lowercase[:N_CHANNELS])\n ch_pos = dict(zip(\n ch_names,\n np.arange(N_CHANNELS * 3).reshape(N_CHANNELS, 3),\n ))\n\n montage_ch_only = make_dig_montage(ch_pos=ch_pos, coord_frame='head')\n\n assert repr(montage_ch_only) == (\n '<DigMontage | 0 extras (headshape), 0 HPIs, 0 fiducials, 3 channels>'\n )\n info = create_info(ch_names, sfreq=1, ch_types='eeg')\n info.set_montage(montage_ch_only)\n assert len(info['dig']) == len(montage_ch_only.dig)\n\n assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]),\n desired=[[0., 1., 2., 0., 0., 0.],\n [3., 4., 5., 0., 0., 0.],\n [6., 7., 8., 0., 0., 0.]])\n\n montage_full = make_dig_montage(\n ch_pos=dict(**ch_pos, EEG000=np.full(3, 42)), # 4 = 3 egg + 1 eeg_ref\n nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3],\n hsp=np.full((N_HSP, 3), 4),\n hpi=np.full((N_HPI, 3), 4),\n coord_frame='head'\n )\n\n assert repr(montage_full) == (\n '<DigMontage | 2 extras (headshape), 1 HPIs, 3 fiducials, 4 channels>'\n )\n\n info = create_info(ch_names, sfreq=1, ch_types='eeg')\n info.set_montage(montage_full)\n EXPECTED_LEN = sum({'hsp': 2, 'hpi': 1, 'fid': 3, 'eeg': 4}.values())\n assert len(info['dig']) == EXPECTED_LEN\n assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]),\n desired=[[0., 1., 2., 42., 42., 42.],\n [3., 4., 5., 42., 42., 42.],\n [6., 7., 8., 42., 42., 42.]])\n\n\[email protected]_testing_data\ndef test_fif_dig_montage(tmpdir):\n \"\"\"Test FIF dig montage support.\"\"\"\n dig_montage = read_dig_fif(fif_dig_montage_fname)\n\n # test round-trip IO\n temp_dir = str(tmpdir)\n fname_temp = op.join(temp_dir, 'test.fif')\n _check_roundtrip(dig_montage, fname_temp)\n\n # Make a BrainVision file like the one the user would have had\n raw_bv = read_raw_brainvision(bv_fname, preload=True)\n raw_bv_2 = raw_bv.copy()\n mapping = dict()\n for ii, ch_name in enumerate(raw_bv.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 1,)\n raw_bv.rename_channels(mapping)\n for ii, ch_name in enumerate(raw_bv_2.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 33,)\n raw_bv_2.rename_channels(mapping)\n raw_bv.add_channels([raw_bv_2])\n for ch in raw_bv.info['chs']:\n ch['kind'] = FIFF.FIFFV_EEG_CH\n\n # Set the montage\n raw_bv.set_montage(dig_montage)\n\n # Check the result\n evoked = read_evokeds(evoked_fname)[0]\n\n # check info[chs] matches\n assert_equal(len(raw_bv.ch_names), len(evoked.ch_names) - 1)\n for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs'][:-1]):\n assert_equal(ch_py['ch_name'],\n ch_c['ch_name'].replace('EEG ', 'EEG'))\n # C actually says it's unknown, but it's not (?):\n # assert_equal(ch_py['coord_frame'], ch_c['coord_frame'])\n assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD)\n c_loc = ch_c['loc'].copy()\n c_loc[c_loc == 0] = np.nan\n assert_allclose(ch_py['loc'], c_loc, atol=1e-7)\n\n # check info[dig]\n assert_dig_allclose(raw_bv.info, evoked.info)\n\n # Roundtrip of non-FIF start\n montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp),\n hpi=read_mrk(hpi))\n elp_points = read_polhemus_fastscan(elp)\n ch_pos = {\"EEG%03d\" % (k + 1): pos for k, pos in enumerate(elp_points[8:])}\n montage += make_dig_montage(nasion=elp_points[0],\n lpa=elp_points[1],\n rpa=elp_points[2],\n ch_pos=ch_pos)\n _check_roundtrip(montage, fname_temp, 'unknown')\n montage = transform_to_head(montage)\n _check_roundtrip(montage, fname_temp)\n montage.dig[0]['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN\n with pytest.raises(RuntimeError, match='Only a single coordinate'):\n montage.save(fname_temp)\n\n\[email protected]_testing_data\ndef test_egi_dig_montage(tmpdir):\n \"\"\"Test EGI MFF XML dig montage support.\"\"\"\n dig_montage = read_dig_egi(egi_dig_montage_fname)\n fid, coord = _get_fid_coords(dig_montage.dig)\n\n assert coord == FIFF.FIFFV_COORD_UNKNOWN\n assert_allclose(\n actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),\n desired=[[ 0. , 10.564, -2.051], # noqa\n [-8.592, 0.498, -4.128], # noqa\n [ 8.592, 0.498, -4.128]], # noqa\n )\n\n # Test accuracy and embedding within raw object\n raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d')\n\n raw_egi.set_montage(dig_montage)\n test_raw_egi = read_raw_fif(egi_fif_fname)\n\n assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names))\n for ch_raw, ch_test_raw in zip(raw_egi.info['chs'],\n test_raw_egi.info['chs']):\n assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name'])\n assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD)\n assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7)\n\n assert_dig_allclose(raw_egi.info, test_raw_egi.info)\n\n dig_montage_in_head = transform_to_head(dig_montage.copy())\n fid, coord = _get_fid_coords(dig_montage_in_head.dig)\n assert coord == FIFF.FIFFV_COORD_HEAD\n assert_allclose(\n actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),\n desired=[[0., 10.278, 0.], [-8.592, 0., 0.], [8.592, 0., 0.]],\n atol=1e-4,\n )\n\n # test round-trip IO\n fname_temp = tmpdir.join('egi_test.fif')\n _check_roundtrip(dig_montage, fname_temp, 'unknown')\n _check_roundtrip(dig_montage_in_head, fname_temp)\n\n\ndef _pop_montage(dig_montage, ch_name):\n # remove reference that was not used in old API\n name_idx = dig_montage.ch_names.index(ch_name)\n dig_idx = dig_montage._get_dig_names().index(ch_name)\n\n del dig_montage.dig[dig_idx]\n del dig_montage.ch_names[name_idx]\n for k in range(dig_idx, len(dig_montage.dig)):\n dig_montage.dig[k]['ident'] -= 1\n\n\[email protected]_testing_data\ndef test_read_dig_captrak(tmpdir):\n \"\"\"Test reading a captrak montage file.\"\"\"\n EXPECTED_CH_NAMES_OLD = [\n 'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1',\n 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4',\n 'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6',\n 'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'GND', 'O1', 'O2',\n 'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3',\n 'PO4', 'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'REF', 'T7', 'T8', 'TP10',\n 'TP7', 'TP8', 'TP9'\n ]\n EXPECTED_CH_NAMES = [\n 'T7', 'FC5', 'F7', 'C5', 'FT7', 'FT9', 'TP7', 'TP9', 'P7', 'CP5',\n 'PO7', 'C3', 'CP3', 'P5', 'P3', 'PO3', 'PO9', 'O1', 'Oz', 'POz', 'O2',\n 'PO4', 'P1', 'Pz', 'P2', 'CP2', 'CP1', 'CPz', 'Cz', 'C1', 'FC1', 'FC3',\n 'REF', 'F3', 'F1', 'Fz', 'F5', 'AF7', 'AF3', 'Fp1', 'GND', 'F2', 'AF4',\n 'Fp2', 'F4', 'F8', 'F6', 'AF8', 'FC2', 'FC6', 'FC4', 'C2', 'C4', 'P4',\n 'CP4', 'PO8', 'P8', 'P6', 'CP6', 'PO10', 'TP10', 'TP8', 'FT10', 'T8',\n 'C6', 'FT8'\n ]\n assert set(EXPECTED_CH_NAMES) == set(EXPECTED_CH_NAMES_OLD)\n montage = read_dig_captrak(\n fname=op.join(data_path, 'montage', 'captrak_coords.bvct')\n )\n\n assert montage.ch_names == EXPECTED_CH_NAMES\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 0 HPIs, 3 fiducials, 66 channels>'\n )\n\n montage = transform_to_head(montage) # transform_to_head has to be tested\n _check_roundtrip(montage=montage, fname=str(tmpdir.join('bvct_test.fif')))\n\n fid, _ = _get_fid_coords(montage.dig)\n assert_allclose(\n actual=np.array([fid.nasion, fid.lpa, fid.rpa]),\n desired=[[0, 0.11309, 0], [-0.09189, 0, 0], [0.09240, 0, 0]],\n atol=1e-5,\n )\n\n raw_bv = read_raw_brainvision(bv_raw_fname)\n raw_bv.set_channel_types({\"HEOG\": 'eog', \"VEOG\": 'eog', \"ECG\": 'ecg'})\n\n raw_bv.set_montage(montage)\n\n test_raw_bv = read_raw_fif(bv_fif_fname)\n\n # compare after set_montage using chs loc.\n for actual, expected in zip(raw_bv.info['chs'], test_raw_bv.info['chs']):\n assert_allclose(actual['loc'][:3], expected['loc'][:3])\n if actual['kind'] == FIFF.FIFFV_EEG_CH:\n assert_allclose(actual['loc'][3:6],\n [-0.005103, 0.05395, 0.144622], rtol=1e-04)\n\n\n# https://gist.github.com/larsoner/2264fb5895070d29a8c9aa7c0dc0e8a6\n_MGH60 = [\n 'Fz', 'F2', 'AF4', 'Fpz', 'Fp1', 'AF8', 'FT9', 'F7', 'FC5', 'FC6', 'FT7',\n 'F1', 'AF7', 'FT8', 'F6', 'F5', 'FC1', 'FC2', 'FT10', 'T9', 'Cz', 'F4',\n 'T7', 'C2', 'C4', 'C1', 'C3', 'F8', 'F3', 'C5', 'Fp2', 'AF3',\n 'CP2', 'P2', 'O2', 'Iz', 'Oz', 'PO4', 'O1', 'P8', 'PO8', 'P6', 'PO7', 'PO3', 'C6', 'TP9', 'TP8', 'CP4', 'P4', # noqa\n 'CP3', 'CP1', 'TP7', 'P3', 'Pz', 'P1', 'P7', 'P5', 'TP10', 'T8', 'T10',\n]\n\n\[email protected]('rename', ('raw', 'montage', 'custom'))\ndef test_set_montage_mgh(rename):\n \"\"\"Test setting 'mgh60' montage to old fif.\"\"\"\n raw = read_raw_fif(fif_fname)\n eeg_picks = pick_types(raw.info, meg=False, eeg=True, exclude=())\n assert list(eeg_picks) == [ii for ii, name in enumerate(raw.ch_names)\n if name.startswith('EEG')]\n orig_pos = np.array([raw.info['chs'][pick]['loc'][:3]\n for pick in eeg_picks])\n atol = 1e-6\n if rename == 'raw':\n raw.rename_channels(lambda x: x.replace('EEG ', 'EEG'))\n raw.set_montage('mgh60') # test loading with string argument\n elif rename == 'montage':\n mon = make_standard_montage('mgh60')\n mon.rename_channels(lambda x: x.replace('EEG', 'EEG '))\n assert [raw.ch_names[pick] for pick in eeg_picks] == mon.ch_names\n raw.set_montage(mon)\n else:\n atol = 3e-3 # XXX old defs here apparently (maybe not realistic)?\n assert rename == 'custom'\n assert len(_MGH60) == 60\n mon = make_standard_montage('standard_1020')\n\n def renamer(x):\n try:\n return 'EEG %03d' % (_MGH60.index(x) + 1,)\n except ValueError:\n return x\n\n mon.rename_channels(renamer)\n raw.set_montage(mon)\n\n new_pos = np.array([ch['loc'][:3] for ch in raw.info['chs']\n if ch['ch_name'].startswith('EEG')])\n assert ((orig_pos != new_pos).all())\n\n r0 = _fit_sphere(new_pos)[1]\n assert_allclose(r0, [0.000775, 0.006881, 0.047398], atol=1e-3)\n # spot check\n assert_allclose(new_pos[:2], [[0.000273, 0.084920, 0.105838],\n [0.028822, 0.083529, 0.099164]], atol=atol)\n\n\n# XXX: this does not check ch_names + it cannot work because of write_dig\ndef _check_roundtrip(montage, fname, coord_frame='head'):\n \"\"\"Check roundtrip writing.\"\"\"\n montage.save(fname)\n montage_read = read_dig_fif(fname=fname)\n\n assert_equal(repr(montage), repr(montage_read))\n assert_equal(_check_get_coord_frame(montage_read.dig), coord_frame)\n assert_dig_allclose(montage, montage_read)\n\n\ndef _fake_montage(ch_names):\n pos = np.random.RandomState(42).randn(len(ch_names), 3)\n return make_dig_montage(ch_pos=dict(zip(ch_names, pos)),\n coord_frame='head')\n\n\ncnt_ignore_warns = [\n pytest.mark.filterwarnings(\n 'ignore:.*Could not parse meas date from the header. Setting to None.'\n ),\n pytest.mark.filterwarnings((\n 'ignore:.*Could not define the number of bytes automatically.'\n ' Defaulting to 2.')\n ),\n]\n\n\ndef test_digmontage_constructor_errors():\n \"\"\"Test proper error messaging.\"\"\"\n with pytest.raises(ValueError, match='does not match the number'):\n _ = DigMontage(ch_names=['foo', 'bar'], dig=list())\n\n\ndef test_transform_to_head_and_compute_dev_head_t():\n \"\"\"Test transform_to_head and compute_dev_head_t.\"\"\"\n EXPECTED_DEV_HEAD_T = \\\n [[-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04],\n [8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02],\n [-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02],\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]\n\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([0.001393, 0.0131613, -0.0046967]),\n 'lpa': np.array([-0.0624997, -0.0737271, 0.07996]),\n 'rpa': np.array([-0.0748957, 0.0873785, 0.0811943]),\n }\n\n EXPECTED_FID_IN_HEAD = {\n 'nasion': np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]),\n 'lpa': np.array([-8.10816716e-02, 6.56321671e-18, 0]),\n 'rpa': np.array([8.05048781e-02, -6.47441364e-18, 0]),\n }\n\n hpi_dev = np.array(\n [[ 2.13951493e-02, 8.47444056e-02, -5.65431188e-02], # noqa\n [ 2.10299433e-02, -8.03141101e-02, -6.34420259e-02], # noqa\n [ 1.05916829e-01, 8.18485672e-05, 1.19928083e-02], # noqa\n [ 9.26595105e-02, 4.64804385e-02, 8.45141253e-03], # noqa\n [ 9.42554419e-02, -4.35206589e-02, 8.78999363e-03]] # noqa\n )\n\n hpi_polhemus = np.array(\n [[-0.0595004, -0.0704836, 0.075893 ], # noqa\n [-0.0646373, 0.0838228, 0.0762123], # noqa\n [-0.0135035, 0.0072522, -0.0268405], # noqa\n [-0.0202967, -0.0351498, -0.0129305], # noqa\n [-0.0277519, 0.0452628, -0.0222407]] # noqa\n )\n\n montage_polhemus = make_dig_montage(\n **EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame='unknown'\n )\n\n montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame='meg')\n\n # Test regular worflow to get dev_head_t\n montage = montage_polhemus + montage_meg\n fids, _ = _get_fid_coords(montage.dig)\n for kk in fids:\n assert_allclose(fids[kk], EXPECTED_FID_IN_POLHEMUS[kk], atol=1e-5)\n\n with pytest.raises(ValueError, match='set to head coordinate system'):\n _ = compute_dev_head_t(montage)\n\n montage = transform_to_head(montage)\n\n fids, _ = _get_fid_coords(montage.dig)\n for kk in fids:\n assert_allclose(fids[kk], EXPECTED_FID_IN_HEAD[kk], atol=1e-5)\n\n dev_head_t = compute_dev_head_t(montage)\n assert_allclose(dev_head_t['trans'], EXPECTED_DEV_HEAD_T, atol=5e-7)\n\n # Test errors when number of HPI points do not match\n EXPECTED_ERR_MSG = 'Device-to-Head .*Got 0 .*device and 5 points in head'\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = compute_dev_head_t(transform_to_head(montage_polhemus))\n\n EXPECTED_ERR_MSG = 'Device-to-Head .*Got 5 .*device and 0 points in head'\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = compute_dev_head_t(transform_to_head(\n montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS)\n ))\n\n EXPECTED_ERR_MSG = 'Device-to-Head .*Got 3 .*device and 5 points in head'\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = compute_dev_head_t(transform_to_head(\n DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) +\n montage_polhemus\n ))\n\n\ndef test_set_montage_with_mismatching_ch_names():\n \"\"\"Test setting a DigMontage with mismatching ch_names.\"\"\"\n raw = read_raw_fif(fif_fname)\n montage = make_standard_montage('mgh60')\n\n # 'EEG 001' and 'EEG001' won't match\n missing_err = '60 channel positions not present'\n with pytest.raises(ValueError, match=missing_err):\n raw.set_montage(montage)\n\n montage.ch_names = [ # modify the names in place\n name.replace('EEG', 'EEG ') for name in montage.ch_names\n ]\n raw.set_montage(montage) # does not raise\n\n # Case sensitivity\n raw.rename_channels(lambda x: x.lower())\n with pytest.raises(ValueError, match=missing_err):\n raw.set_montage(montage)\n # should work\n raw.set_montage(montage, match_case=False)\n raw.rename_channels(lambda x: x.upper()) # restore\n assert 'EEG 001' in raw.ch_names and 'eeg 001' not in raw.ch_names\n raw.rename_channels({'EEG 002': 'eeg 001'})\n assert 'EEG 001' in raw.ch_names and 'eeg 001' in raw.ch_names\n raw.set_channel_types({'eeg 001': 'misc'})\n raw.set_montage(montage)\n raw.set_channel_types({'eeg 001': 'eeg'})\n with pytest.raises(ValueError, match='1 channel position not present'):\n raw.set_montage(montage)\n with pytest.raises(ValueError, match='match_case=False as 1 channel name'):\n raw.set_montage(montage, match_case=False)\n info = create_info(['EEG 001'], 1000., 'eeg')\n mon = make_dig_montage({'EEG 001': np.zeros(3), 'eeg 001': np.zeros(3)},\n nasion=[0, 1., 0], rpa=[1., 0, 0], lpa=[-1., 0, 0])\n info.set_montage(mon)\n with pytest.raises(ValueError, match='match_case=False as 1 montage name'):\n info.set_montage(mon, match_case=False)\n\n\ndef test_set_montage_with_sub_super_set_of_ch_names():\n \"\"\"Test info and montage ch_names matching criteria.\"\"\"\n N_CHANNELS = len('abcdef')\n montage = _make_toy_dig_montage(N_CHANNELS, coord_frame='head')\n\n # montage and info match\n info = create_info(ch_names=list('abcdef'), sfreq=1, ch_types='eeg')\n info.set_montage(montage)\n\n # montage is a SUPERset of info\n info = create_info(list('abc'), sfreq=1, ch_types='eeg')\n info.set_montage(montage)\n assert len(info['dig']) == len(list('abc'))\n\n # montage is a SUBset of info\n _MSG = 'subset of info. There are 2 .* not present in the DigMontage'\n info = create_info(ch_names=list('abcdfgh'), sfreq=1, ch_types='eeg')\n with pytest.raises(ValueError, match=_MSG) as exc:\n info.set_montage(montage)\n # plus suggestions\n assert exc.match('set_channel_types')\n assert exc.match('on_missing')\n\n\ndef test_heterogeneous_ch_type():\n \"\"\"Test ch_names matching criteria with heterogeneous ch_type.\"\"\"\n VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg')\n\n montage = _make_toy_dig_montage(\n n_channels=len(VALID_MONTAGE_NAMED_CHS),\n coord_frame='head',\n )\n\n # Montage and info match\n info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS))\n RawArray(np.zeros((3, 1)), info, copy=None).set_montage(montage)\n\n\ndef test_set_montage_coord_frame_in_head_vs_unknown():\n \"\"\"Test set montage using head and unknown only.\"\"\"\n N_CHANNELS, NaN = 3, np.nan\n\n raw = _make_toy_raw(N_CHANNELS)\n montage_in_head = _make_toy_dig_montage(N_CHANNELS, coord_frame='head')\n montage_in_unknown = _make_toy_dig_montage(\n N_CHANNELS, coord_frame='unknown'\n )\n montage_in_unknown_with_fid = _make_toy_dig_montage(\n N_CHANNELS, coord_frame='unknown',\n nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0],\n )\n\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=np.full((N_CHANNELS, 12), np.nan)\n )\n\n raw.set_montage(montage_in_head)\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=[\n [0., 1., 2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [3., 4., 5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [6., 7., 8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n ]\n )\n\n with pytest.warns(RuntimeWarning, match='assuming identity'):\n raw.set_montage(montage_in_unknown)\n\n raw.set_montage(montage_in_unknown_with_fid)\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=[\n [-0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [-6., 7., -8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n ]\n )\n\n # check no collateral effects from transforming montage\n assert _check_get_coord_frame(montage_in_unknown_with_fid.dig) == 'unknown'\n assert_array_equal(\n _get_dig_montage_pos(montage_in_unknown_with_fid),\n [[0, 1, 2], [3, 4, 5], [6, 7, 8]],\n )\n\n\ndef test_set_montage_with_missing_coordinates():\n \"\"\"Test set montage with missing coordinates.\"\"\"\n N_CHANNELS, NaN = 3, np.nan\n\n raw = _make_toy_raw(N_CHANNELS)\n raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names})\n # don't include all the channels\n ch_names = raw.ch_names[1:]\n n_channels = len(ch_names)\n ch_coords = np.arange(n_channels * 3).reshape(n_channels, 3)\n montage_in_mri = make_dig_montage(\n ch_pos=dict(zip(ch_names, ch_coords,)),\n coord_frame='unknown',\n nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0],\n )\n\n with pytest.raises(ValueError, match='DigMontage is '\n 'only a subset of info'):\n raw.set_montage(montage_in_mri)\n\n with pytest.raises(ValueError, match='Invalid value'):\n raw.set_montage(montage_in_mri, on_missing='foo')\n\n with pytest.raises(TypeError, match='must be an instance'):\n raw.set_montage(montage_in_mri, on_missing=True)\n\n with pytest.warns(RuntimeWarning, match='DigMontage is '\n 'only a subset of info'):\n raw.set_montage(montage_in_mri, on_missing='warn')\n\n raw.set_montage(montage_in_mri, on_missing='ignore')\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=[\n [NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN],\n [0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n ]\n )\n\n\[email protected]_testing_data\ndef test_get_montage():\n \"\"\"Test get montage from Instance.\n\n Test with standard montage and then loaded in montage.\n \"\"\"\n # 1. read in testing data and assert montage roundtrip\n # for testing dataset: 'test_raw.fif'\n raw = read_raw_fif(fif_fname)\n raw = raw.rename_channels(lambda name: name.replace('EEG ', 'EEG'))\n raw2 = raw.copy()\n # get montage and then set montage and\n # it should be the same\n montage = raw.get_montage()\n raw.set_montage(montage, on_missing='raise')\n test_montage = raw.get_montage()\n assert_object_equal(raw.info['chs'], raw2.info['chs'])\n assert_dig_allclose(raw2.info, raw.info)\n assert_object_equal(raw2.info['dig'], raw.info['dig'])\n\n # the montage does not change\n assert_object_equal(montage.dig, test_montage.dig)\n\n # the montage should fulfill a roundtrip with make_dig_montage\n test2_montage = make_dig_montage(**montage.get_positions())\n assert_object_equal(test2_montage.dig, test_montage.dig)\n\n # 2. now do a standard montage\n montage = make_standard_montage('mgh60')\n # set the montage; note renaming to make standard montage map\n raw.set_montage(montage)\n\n # get montage back and set it\n # the channel locations should be the same\n raw2 = raw.copy()\n test_montage = raw.get_montage()\n raw.set_montage(test_montage, on_missing='ignore')\n\n # the montage should fulfill a roundtrip with make_dig_montage\n test2_montage = make_dig_montage(**test_montage.get_positions())\n assert_object_equal(test2_montage.dig, test_montage.dig)\n\n # chs should not change\n assert_object_equal(raw2.info['chs'], raw.info['chs'])\n # dig order might be different after set_montage\n assert montage.ch_names == test_montage.ch_names\n # note that test_montage will have different coordinate frame\n # compared to standard montage\n assert_dig_allclose(raw2.info, raw.info)\n assert_object_equal(raw2.info['dig'], raw.info['dig'])\n\n # 3. if montage gets set to None\n raw.set_montage(None)\n assert raw.get_montage() is None\n\n # 4. read in BV test dataset and make sure montage\n # fulfills roundtrip on non-standard montage\n dig_montage = read_dig_fif(fif_dig_montage_fname)\n\n # Make a BrainVision file like the one the user would have had\n # with testing dataset 'test.vhdr'\n raw_bv = read_raw_brainvision(bv_fname, preload=True)\n raw_bv_2 = raw_bv.copy()\n\n # rename channels to make it have the full set\n # of channels\n mapping = dict()\n for ii, ch_name in enumerate(raw_bv.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 1,)\n raw_bv.rename_channels(mapping)\n for ii, ch_name in enumerate(raw_bv_2.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 33,)\n raw_bv_2.rename_channels(mapping)\n raw_bv.add_channels([raw_bv_2])\n for ch in raw_bv.info['chs']:\n ch['kind'] = FIFF.FIFFV_EEG_CH\n\n # Set the montage and roundtrip\n raw_bv.set_montage(dig_montage)\n raw_bv2 = raw_bv.copy()\n\n # reset the montage\n test_montage = raw_bv.get_montage()\n raw_bv.set_montage(test_montage, on_missing='ignore')\n # dig order might be different after set_montage\n assert_object_equal(raw_bv2.info['dig'], raw_bv.info['dig'])\n assert_dig_allclose(raw_bv2.info, raw_bv.info)\n\n # if dig is not set in the info, then montage returns None\n raw.info['dig'] = None\n assert raw.get_montage() is None\n\n # the montage should fulfill a roundtrip with make_dig_montage\n test2_montage = make_dig_montage(**test_montage.get_positions())\n assert_object_equal(test2_montage.dig, test_montage.dig)\n\n\ndef test_read_dig_hpts():\n \"\"\"Test reading .hpts file (from MNE legacy).\"\"\"\n fname = op.join(\n op.dirname(_BRAINVISON_FILE), 'tests', 'data', 'test.hpts'\n )\n\n montage = read_dig_hpts(fname)\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 5 HPIs, 3 fiducials, 34 channels>'\n )\n\n\ndef test_get_builtin_montages():\n \"\"\"Test help function to obtain builtin montages.\"\"\"\n EXPECTED_NUM = 24\n assert len(get_builtin_montages()) == EXPECTED_NUM\n\n\[email protected]_testing_data\ndef test_plot_montage():\n \"\"\"Test plotting montage.\"\"\"\n # gh-8025\n montage = read_dig_captrak(bvct_dig_montage_fname)\n montage.plot()\n plt.close('all')\n\n\nrun_tests_if_main()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.eye",
"numpy.empty",
"numpy.stack",
"numpy.testing.assert_array_equal",
"numpy.full",
"matplotlib.pyplot.close",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NYC00kie/PhysWikiQuiz | [
"4243fd6fa6f23670b9743b6a2c79339a9f3d32fc"
] | [
"module_unit_tests.py"
] | [
"import unittest\n\nfrom old import module0_formula_and_identifier_retrieval\n\nimport pandas as pd\n\n# Python Tutorial: Unit Testing Your Code with the unittest Module:\n#https://www.youtube.com/watch?v=6tNS--WetLI\n\n# Retrieve sample QIDs\nsample_IDs_filepath = r'evaluation\\sample_IDs.csv'\nQIDs_column_name = 'QID'\n\ndef get_sample_QIDs():\n sample_IDs_table = pd.read_csv(sample_IDs_filepath,delimiter=';')\n sample_QIDs = list(sample_IDs_table[QIDs_column_name])\n return sample_QIDs\n\nclass TestModules(unittest.TestCase):\n\n # TEST MODULE0\n def test_module0(self):\n #qid = 'Q11376'\n #sample_QIDs = [qid]\n sample_QIDs = get_sample_QIDs()\n for qid in sample_QIDs:\n Wikidata_item = module0_formula_and_identifier_retrieval \\\n .get_Wikidata_item(qid)\n\n self.assertIsNotNone(Wikidata_item)\n #self.assertIsNotNone(module0_formula_and_identifier_retrieval\n # .get_concept_name(Wikidata_item))\n\n # TEST MODULE1\n def test_module1(self):\n # TODO: insert code for unit test of module1 here\n pass\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bertelschmitt/multistreamYOLO | [
"827a1d2ae11653fe5fde2cee3b52cda8baae9899"
] | [
"2_Training/Train_YOLO.py"
] | [
"\n\"\"\"\nMODIFIED FROM keras-yolo3 PACKAGE, https://github.com/qqwweee/keras-yolo3\nRetrain the YOLO model for your own dataset.\n\n10-26-20 MODIFIED by bertelschmitt to use new repo name if changed to something else than \"TrainYourOwnYOLO\"\n10-31-20 UPDATED by bertelschmitt to reflect TrainYourOwnYOLO versions as of 10-31-20\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport warnings\n\n\ndef get_parent_dir(n=1):\n \"\"\"returns the n-th parent dicrectory of the current\n working directory\"\"\"\n current_path = os.path.dirname(os.path.abspath(__file__))\n for _ in range(n):\n current_path = os.path.dirname(current_path)\n return current_path\n\n\nsrc_path = os.path.join(get_parent_dir(0), \"src\")\nsys.path.append(src_path)\n\nutils_path = os.path.join(get_parent_dir(1), \"Utils\")\nsys.path.append(utils_path)\n\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Input, Lambda\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import (\n TensorBoard,\n ModelCheckpoint,\n ReduceLROnPlateau,\n EarlyStopping,\n)\nfrom keras_yolo3.yolo3.model import (\n preprocess_true_boxes,\n yolo_body,\n tiny_yolo_body,\n yolo_loss,\n)\nfrom keras_yolo3.yolo3.utils import get_random_data\nfrom PIL import Image\nfrom time import time\nimport tensorflow.compat.v1 as tf\nimport pickle\n\nfrom Train_Utils import (\n get_classes,\n get_anchors,\n create_model,\n create_tiny_model,\n data_generator,\n data_generator_wrapper,\n ChangeToOtherMachine,\n)\n\n\nkeras_path = os.path.join(src_path, \"keras_yolo3\")\nData_Folder = os.path.join(get_parent_dir(1), \"Data\")\nImage_Folder = os.path.join(Data_Folder, \"Source_Images\", \"Training_Images\")\nVoTT_Folder = os.path.join(Image_Folder, \"vott-csv-export\")\nYOLO_filename = os.path.join(VoTT_Folder, \"data_train.txt\")\n\nModel_Folder = os.path.join(Data_Folder, \"Model_Weights\")\nYOLO_classname = os.path.join(Model_Folder, \"data_classes.txt\")\n\nlog_dir = Model_Folder\nanchors_path = os.path.join(keras_path, \"model_data\", \"yolo_anchors.txt\")\nweights_path = os.path.join(keras_path, \"yolo.h5\")\n#10-26-20 get name of current repo, which should be the directory one down from ours\ncurrent_repo = get_parent_dir(1).rsplit('/', 1)[1]\nFLAGS = None\n\nif __name__ == \"__main__\":\n # Delete all default flags\n parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)\n \"\"\"\n Command line options\n \"\"\"\n\n parser.add_argument(\n \"--annotation_file\",\n type=str,\n default=YOLO_filename,\n help=\"Path to annotation file for Yolo. Default is \" + YOLO_filename,\n )\n parser.add_argument(\n \"--classes_file\",\n type=str,\n default=YOLO_classname,\n help=\"Path to YOLO classnames. Default is \" + YOLO_classname,\n )\n\n parser.add_argument(\n \"--log_dir\",\n type=str,\n default=log_dir,\n help=\"Folder to save training logs and trained weights to. Default is \"\n + log_dir,\n )\n\n parser.add_argument(\n \"--anchors_path\",\n type=str,\n default=anchors_path,\n help=\"Path to YOLO anchors. Default is \" + anchors_path,\n )\n\n parser.add_argument(\n \"--weights_path\",\n type=str,\n default=weights_path,\n help=\"Path to pre-trained YOLO weights. Default is \" + weights_path,\n )\n parser.add_argument(\n \"--val_split\",\n type=float,\n default=0.1,\n help=\"Percentage of training set to be used for validation. Default is 10%.\",\n )\n parser.add_argument(\n \"--is_tiny\",\n default=False,\n action=\"store_true\",\n help=\"Use the tiny Yolo version for better performance and less accuracy. Default is False.\",\n )\n parser.add_argument(\n \"--random_seed\",\n type=float,\n default=None,\n help=\"Random seed value to make script deterministic. Default is 'None', i.e. non-deterministic.\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=51,\n help=\"Number of epochs for training last layers and number of epochs for fine-tuning layers. Default is 51.\",\n )\n parser.add_argument(\n \"--warnings\",\n default=False,\n action=\"store_true\",\n help=\"Display warning messages. Default is False.\",\n )\n\n FLAGS = parser.parse_args()\n\n if not FLAGS.warnings:\n tf.logging.set_verbosity(tf.logging.ERROR)\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n warnings.filterwarnings(\"ignore\")\n\n #Backported w/o change 10/31/20 from TrainYourOwnYOLO version as of 10/31/20 by BS\n\t# Get WandB integration if setup\n try:\n import wandb\n from wandb.integration.keras import WandbCallback # type: ignore\n\n wandb.ensure_configured()\n if wandb.api.api_key is None:\n _has_wandb = False\n wandb.termwarn(\n \"W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.\"\n )\n else:\n _has_wandb = False if os.getenv(\"WANDB_DISABLED\") else True\n except (ImportError, AttributeError):\n _has_wandb = False\n\t\t\t\t\t\t\t\t\n np.random.seed(FLAGS.random_seed)\n\n log_dir = FLAGS.log_dir\n\n class_names = get_classes(FLAGS.classes_file)\n num_classes = len(class_names)\n\n if FLAGS.is_tiny and FLAGS.weights_path == weights_path:\n weights_path = os.path.join(os.path.dirname(FLAGS.weights_path), \"yolo-tiny.h5\")\n if FLAGS.is_tiny and FLAGS.anchors_path == anchors_path:\n anchors_path = os.path.join(\n os.path.dirname(FLAGS.anchors_path), \"yolo-tiny_anchors.txt\"\n )\n anchors = get_anchors(anchors_path)\n\n input_shape = (416, 416) # multiple of 32, height, width\n epoch1, epoch2 = FLAGS.epochs, FLAGS.epochs\n\n is_tiny_version = len(anchors) == 6 # default setting\n if FLAGS.is_tiny:\n model = create_tiny_model(\n input_shape, anchors, num_classes, freeze_body=2, weights_path=weights_path\n )\n else:\n model = create_model(\n input_shape, anchors, num_classes, freeze_body=2, weights_path=weights_path\n ) # make sure you know what you freeze\n\n log_dir_time = os.path.join(log_dir, \"{}\".format(int(time())))\n logging = TensorBoard(log_dir=log_dir_time)\n checkpoint = ModelCheckpoint(\n os.path.join(log_dir, \"checkpoint.h5\"),\n monitor=\"val_loss\",\n save_weights_only=True,\n save_best_only=True,\n period=5,\n )\n reduce_lr = ReduceLROnPlateau(monitor=\"val_loss\", factor=0.1, patience=3, verbose=1)\n early_stopping = EarlyStopping(\n monitor=\"val_loss\", min_delta=0, patience=10, verbose=1\n )\n\n val_split = FLAGS.val_split\n with open(FLAGS.annotation_file) as f:\n lines = f.readlines()\n\n # This step makes sure that the path names correspond to the local machine\n # This is important if annotation and training are done on different machines (e.g. training on AWS)\n\t# 10-26-20 Changed by bertelschmitt to call with current_repo\n lines = ChangeToOtherMachine(lines, remote_machine=\"\", repo=current_repo)\n np.random.shuffle(lines)\n num_val = int(len(lines) * val_split)\n num_train = len(lines) - num_val\n\t\n\t# From here on down, all backported w/o change 10/31/20 from TrainYourOwnYOLO version as of 10/31/20 by BS \n # Train with frozen layers first, to get a stable loss.\n # Adjust num epochs to your dataset. This step is enough to obtain a decent model.\n frozen_callbacks = [logging, checkpoint]\n\n if _has_wandb:\n wandb.init(\n project=\"TrainYourOwnYOLO\", config=vars(FLAGS), sync_tensorboard=False\n )\n wandb_callback = WandbCallback(save_model=False)\n frozen_callbacks.append(wandb_callback)\n\n model.compile(\n optimizer=Adam(lr=1e-3),\n loss={\n # use custom yolo_loss Lambda layer.\n \"yolo_loss\": lambda y_true, y_pred: y_pred\n },\n )\n\n batch_size = 32\n print(\n \"Train on {} samples, val on {} samples, with batch size {}.\".format(\n num_train, num_val, batch_size\n )\n )\n history = model.fit_generator(\n data_generator_wrapper(\n lines[:num_train], batch_size, input_shape, anchors, num_classes\n ),\n steps_per_epoch=max(1, num_train // batch_size),\n validation_data=data_generator_wrapper(\n lines[num_train:], batch_size, input_shape, anchors, num_classes\n ),\n validation_steps=max(1, num_val // batch_size),\n epochs=epoch1,\n initial_epoch=0,\n callbacks=frozen_callbacks,\n )\n model.save_weights(os.path.join(log_dir, \"trained_weights_stage_1.h5\"))\n\n # Unfreeze and continue training, to fine-tune.\n # Train longer if the result is unsatisfactory.\n\n full_callbacks = [logging, checkpoint, reduce_lr, early_stopping]\n\n if _has_wandb:\n full_callbacks.append(wandb_callback)\n\n for i in range(len(model.layers)):\n model.layers[i].trainable = True\n model.compile(\n optimizer=Adam(lr=1e-4), loss={\"yolo_loss\": lambda y_true, y_pred: y_pred}\n ) # recompile to apply the change\n\n print(\"Unfreeze all layers.\")\n\n batch_size = 4 # note that more GPU memory is required after unfreezing the body\n print(\n \"Train on {} samples, val on {} samples, with batch size {}.\".format(\n num_train, num_val, batch_size\n )\n )\n history = model.fit_generator(\n data_generator_wrapper(\n lines[:num_train], batch_size, input_shape, anchors, num_classes\n ),\n steps_per_epoch=max(1, num_train // batch_size),\n validation_data=data_generator_wrapper(\n lines[num_train:], batch_size, input_shape, anchors, num_classes\n ),\n validation_steps=max(1, num_val // batch_size),\n epochs=epoch1 + epoch2,\n initial_epoch=epoch1,\n callbacks=full_callbacks,\n )\n model.save_weights(os.path.join(log_dir, \"trained_weights_final.h5\"))\n"
] | [
[
"numpy.random.shuffle",
"numpy.random.seed",
"tensorflow.compat.v1.logging.set_verbosity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
limeng357/Paddle | [
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482",
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482"
] | [
"python/paddle/fluid/tests/unittests/test_bipartite_match_op.py",
"python/paddle/fluid/tests/unittests/test_pool2d_op.py",
"python/paddle/fluid/data_feeder.py",
"python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py",
"python/paddle/fluid/tests/unittests/test_parallel_op.py",
"python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py",
"python/paddle/fluid/tests/unittests/test_multiplex_op.py",
"python/paddle/fluid/tests/unittests/test_sign_op.py",
"python/paddle/fluid/tests/unittests/test_random_crop_op.py"
] | [
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\n\n\ndef bipartite_match(distance, match_indices, match_dist):\n \"\"\"Bipartite Matching algorithm.\n Arg:\n distance (numpy.array) : The distance of two entries with shape [M, N].\n match_indices (numpy.array): the matched indices from column to row\n with shape [1, N], it must be initialized to -1.\n match_dist (numpy.array): The matched distance from column to row\n with shape [1, N], it must be initialized to 0.\n \"\"\"\n match_pair = []\n row, col = distance.shape\n for i in range(row):\n for j in range(col):\n match_pair.append((i, j, distance[i][j]))\n\n match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True)\n\n row_indices = -1 * np.ones((row, ), dtype=np.int)\n\n idx = 0\n for i, j, dist in match_sorted:\n if idx >= row:\n break\n if match_indices[j] == -1 and row_indices[i] == -1 and dist > 0:\n match_indices[j] = i\n row_indices[i] = j\n match_dist[j] = dist\n idx += 1\n\n\ndef argmax_match(distance, match_indices, match_dist, threshold):\n r, c = distance.shape\n for j in xrange(c):\n if match_indices[j] != -1:\n continue\n col_dist = distance[:, j]\n indices = np.argwhere(col_dist >= threshold).flatten()\n if len(indices) < 1:\n continue\n match_indices[j] = indices[np.argmax(col_dist[indices])]\n match_dist[j] = col_dist[match_indices[j]]\n\n\ndef batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None):\n \"\"\"Bipartite Matching algorithm for batch input.\n Arg:\n distance (numpy.array) : The distance of two entries with shape [M, N].\n lod (list of int): The offsets of each input in this batch.\n \"\"\"\n n = len(lod) - 1\n m = distance.shape[1]\n match_indices = -1 * np.ones((n, m), dtype=np.int)\n match_dist = np.zeros((n, m), dtype=np.float32)\n for i in range(len(lod) - 1):\n bipartite_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :],\n match_dist[i, :])\n if match_type == 'per_prediction':\n argmax_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :],\n match_dist[i, :], dist_threshold)\n return match_indices, match_dist\n\n\nclass TestBipartiteMatchOpWithLoD(OpTest):\n def setUp(self):\n self.op_type = 'bipartite_match'\n lod = [[0, 5, 11, 23]]\n dist = np.random.random((23, 217)).astype('float32')\n match_indices, match_dist = batch_bipartite_match(dist, lod[0])\n\n self.inputs = {'DistMat': (dist, lod)}\n self.outputs = {\n 'ColToRowMatchIndices': match_indices,\n 'ColToRowMatchDist': match_dist,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestBipartiteMatchOpWithoutLoD(OpTest):\n def setUp(self):\n self.op_type = 'bipartite_match'\n lod = [[0, 8]]\n dist = np.random.random((8, 17)).astype('float32')\n match_indices, match_dist = batch_bipartite_match(dist, lod[0])\n\n self.inputs = {'DistMat': dist}\n self.outputs = {\n 'ColToRowMatchIndices': match_indices,\n 'ColToRowMatchDist': match_dist,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestBipartiteMatchOpWithPerPredictionType(OpTest):\n def setUp(self):\n self.op_type = 'bipartite_match'\n lod = [[0, 5, 11, 23]]\n dist = np.random.random((23, 237)).astype('float32')\n match_indices, match_dist = batch_bipartite_match(dist, lod[0],\n 'per_prediction', 0.5)\n\n self.inputs = {'DistMat': (dist, lod)}\n self.outputs = {\n 'ColToRowMatchIndices': match_indices,\n 'ColToRowMatchDist': match_dist,\n }\n self.attrs = {\n 'match_type': 'per_prediction',\n 'dist_threshold': 0.5,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\n\nimport paddle.fluid.core as core\nfrom op_test import OpTest\n\n\ndef max_pool2D_forward_naive(x,\n ksize,\n strides,\n paddings,\n global_pool=0,\n ceil_mode=False):\n N, C, H, W = x.shape\n if global_pool == 1:\n ksize = [H, W]\n H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - 1\n ) / strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 *\n paddings[0]) / strides[0] + 1\n W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - 1\n ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 *\n paddings[1]) / strides[1] + 1\n out = np.zeros((N, C, H_out, W_out))\n for i in xrange(H_out):\n for j in xrange(W_out):\n r_start = np.max((i * strides[0] - paddings[0], 0))\n r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H))\n c_start = np.max((j * strides[1] - paddings[1], 0))\n c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W))\n x_masked = x[:, :, r_start:r_end, c_start:c_end]\n\n out[:, :, i, j] = np.max(x_masked, axis=(2, 3))\n return out\n\n\ndef avg_pool2D_forward_naive(x,\n ksize,\n strides,\n paddings,\n global_pool=0,\n ceil_mode=False):\n N, C, H, W = x.shape\n if global_pool == 1:\n ksize = [H, W]\n H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - 1\n ) / strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 *\n paddings[0]) / strides[0] + 1\n W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - 1\n ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 *\n paddings[1]) / strides[1] + 1\n out = np.zeros((N, C, H_out, W_out))\n for i in xrange(H_out):\n for j in xrange(W_out):\n r_start = np.max((i * strides[0] - paddings[0], 0))\n r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H))\n c_start = np.max((j * strides[1] - paddings[1], 0))\n c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W))\n x_masked = x[:, :, r_start:r_end, c_start:c_end]\n\n out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / (\n (r_end - r_start) * (c_end - c_start))\n return out\n\n\nclass TestPool2d_Op(OpTest):\n def setUp(self):\n self.op_type = \"pool2d\"\n self.use_cudnn = False\n self.use_mkldnn = False\n self.dtype = np.float32\n self.init_test_case()\n self.init_global_pool()\n self.init_kernel_type()\n self.init_pool_type()\n self.init_ceil_mode()\n if self.global_pool:\n self.paddings = [0 for _ in range(len(self.paddings))]\n input = np.random.random(self.shape).astype(self.dtype)\n output = self.pool2D_forward_naive(input, self.ksize, self.strides,\n self.paddings, self.global_pool,\n self.ceil_mode).astype(self.dtype)\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}\n\n self.attrs = {\n 'strides': self.strides,\n 'paddings': self.paddings,\n 'ksize': self.ksize,\n 'pooling_type': self.pool_type,\n 'global_pooling': self.global_pool,\n 'use_cudnn': self.use_cudnn,\n 'use_mkldnn': self.use_mkldnn,\n 'ceil_mode': self.ceil_mode,\n 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter\n }\n\n self.outputs = {'Out': output}\n\n def testcudnn(self):\n return core.is_compiled_with_cuda() and self.use_cudnn\n\n def test_check_output(self):\n if self.testcudnn():\n place = core.CUDAPlace(0)\n self.check_output_with_place(place, atol=1e-5)\n else:\n self.check_output()\n\n def test_check_grad(self):\n if self.dtype == np.float16:\n return\n if self.testcudnn() and self.pool_type != \"max\":\n place = core.CUDAPlace(0)\n self.check_grad_with_place(\n place, set(['X']), 'Out', max_relative_error=0.07)\n elif self.pool_type != \"max\":\n self.check_grad(set(['X']), 'Out', max_relative_error=0.07)\n\n def init_test_case(self):\n self.shape = [2, 3, 5, 5]\n self.ksize = [3, 3]\n self.strides = [1, 1]\n self.paddings = [0, 0]\n\n def init_kernel_type(self):\n pass\n\n def init_pool_type(self):\n self.pool_type = \"avg\"\n self.pool2D_forward_naive = avg_pool2D_forward_naive\n\n def init_global_pool(self):\n self.global_pool = True\n\n def init_ceil_mode(self):\n self.ceil_mode = False\n\n\nclass TestCase1(TestPool2d_Op):\n def init_test_case(self):\n self.shape = [2, 3, 7, 7]\n self.ksize = [3, 3]\n self.strides = [1, 1]\n self.paddings = [0, 0]\n\n def init_pool_type(self):\n self.pool_type = \"avg\"\n self.pool2D_forward_naive = avg_pool2D_forward_naive\n\n def init_global_pool(self):\n self.global_pool = False\n\n\nclass TestCase2(TestPool2d_Op):\n def init_test_case(self):\n self.shape = [2, 3, 7, 7]\n self.ksize = [3, 3]\n self.strides = [1, 1]\n self.paddings = [1, 1]\n\n def init_pool_type(self):\n self.pool_type = \"avg\"\n self.pool2D_forward_naive = avg_pool2D_forward_naive\n\n def init_global_pool(self):\n self.global_pool = False\n\n\nclass TestCase3(TestPool2d_Op):\n def init_pool_type(self):\n self.pool_type = \"max\"\n self.pool2D_forward_naive = max_pool2D_forward_naive\n\n\nclass TestCase4(TestCase1):\n def init_pool_type(self):\n self.pool_type = \"max\"\n self.pool2D_forward_naive = max_pool2D_forward_naive\n\n\nclass TestCase5(TestCase2):\n def init_pool_type(self):\n self.pool_type = \"max\"\n self.pool2D_forward_naive = max_pool2D_forward_naive\n\n\n#--------------------test pool2d--------------------\nclass TestCUDNNCase1(TestPool2d_Op):\n def init_kernel_type(self):\n self.use_cudnn = True\n\n\nclass TestFP16CUDNNCase1(TestPool2d_Op):\n def init_kernel_type(self):\n self.use_cudnn = True\n self.dtype = np.float16\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n self.check_output_with_place(place, atol=1e-3)\n\n\nclass TestCUDNNCase2(TestCase1):\n def init_kernel_type(self):\n self.use_cudnn = True\n\n\nclass TestFP16CUDNNCase2(TestCase1):\n def init_kernel_type(self):\n self.use_cudnn = True\n self.dtype = np.float16\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n self.check_output_with_place(place, atol=1e-3)\n\n\nclass TestCUDNNCase3(TestCase2):\n def init_kernel_type(self):\n self.use_cudnn = True\n\n\nclass TestFP16CUDNNCase3(TestCase2):\n def init_kernel_type(self):\n self.use_cudnn = True\n self.dtype = np.float16\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n self.check_output_with_place(place, atol=1e-3)\n\n\nclass TestCUDNNCase4(TestCase3):\n def init_kernel_type(self):\n self.use_cudnn = True\n\n\nclass TestFP16CUDNNCase4(TestCase3):\n def init_kernel_type(self):\n self.use_cudnn = True\n self.dtype = np.float16\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n self.check_output_with_place(place, atol=1e-3)\n\n\nclass TestCUDNNCase5(TestCase4):\n def init_kernel_type(self):\n self.use_cudnn = True\n\n\nclass TestFP16CUDNNCase5(TestCase4):\n def init_kernel_type(self):\n self.use_cudnn = True\n self.dtype = np.float16\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n self.check_output_with_place(place, atol=1e-3)\n\n\nclass TestCUDNNCase6(TestCase5):\n def init_kernel_type(self):\n self.use_cudnn = True\n\n\nclass TestFP16CUDNNCase6(TestCase5):\n def init_kernel_type(self):\n self.use_cudnn = True\n self.dtype = np.float16\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n self.check_output_with_place(place, atol=1e-3)\n\n\nclass TestCeilModeCase1(TestCUDNNCase1):\n def init_ceil_mode(self):\n self.ceil_mode = True\n\n\nclass TestCeilModeCase2(TestCUDNNCase2):\n def init_ceil_mode(self):\n self.ceil_mode = True\n\n\nclass TestCeilModeCase3(TestCase1):\n def init_ceil_mode(self):\n self.ceil_mode = True\n\n\nclass TestCeilModeCase4(TestCase2):\n def init_ceil_mode(self):\n self.ceil_mode = True\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport core\nimport numpy\nimport six.moves as six\nimport multiprocessing\n\nfrom framework import Variable, default_main_program\n\n__all__ = ['DataFeeder']\n\n\nclass DataToLoDTensorConverter(object):\n def __init__(self, place, lod_level, shape, dtype):\n self.place = place\n self.lod_level = lod_level\n self.shape = shape\n if dtype == core.VarDesc.VarType.FP32:\n self.dtype = 'float32'\n elif dtype == core.VarDesc.VarType.INT64:\n self.dtype = 'int64'\n elif dtype == core.VarDesc.VarType.FP64:\n self.dtype = 'float64'\n elif dtype == core.VarDesc.VarType.INT32:\n self.dtype = 'int32'\n elif dtype == core.VarDesc.VarType.UINT8:\n self.dtype = 'uint8'\n else:\n raise ValueError(\"dtype must be any of [int32, float32, int64, \"\n \"float64, uint8]\")\n\n self.data = []\n self.lod = []\n\n for i in six.range(lod_level):\n self.lod.append([0])\n\n def feed(self, data):\n self._feed_impl_(data, self.lod, self.lod_level)\n\n def _feed_impl_(self, data, lod, lod_level):\n if lod_level == 0:\n self.data.append(data)\n else:\n cur_lod_len = len(data)\n lod[0].append(lod[0][-1] + cur_lod_len)\n for each_data in data:\n self._feed_impl_(each_data, lod[1:], lod_level - 1)\n\n def done(self):\n arr = numpy.array(self.data, dtype=self.dtype).reshape(self.shape)\n t = core.LoDTensor()\n t.set(arr, self.place)\n if self.lod_level > 0:\n t.set_lod(self.lod)\n return t\n\n\nclass DataFeeder(object):\n def __init__(self, feed_list, place, program=None):\n self.feed_dtypes = []\n self.feed_names = []\n self.feed_shapes = []\n self.feed_lod_level = []\n if program is None:\n program = default_main_program()\n for each_var in feed_list:\n if isinstance(each_var, basestring):\n each_var = program.block(0).var(each_var)\n if not isinstance(each_var, Variable):\n raise TypeError(\"Feed list should contain a list of variable\")\n self.feed_dtypes.append(each_var.dtype)\n self.feed_names.append(each_var.name)\n shape = each_var.shape\n batch_size_dim = -1\n for i, s in enumerate(shape):\n if s < 0:\n batch_size_dim = i\n break\n if batch_size_dim == -1:\n raise ValueError(\"Variable {0} must has a batch size dimension\",\n each_var.name)\n self.feed_lod_level.append(each_var.lod_level)\n self.feed_shapes.append(shape)\n\n self.place = place\n\n def feed(self, iterable):\n converter = []\n for lod_level, shape, dtype in six.zip(\n self.feed_lod_level, self.feed_shapes, self.feed_dtypes):\n converter.append(\n DataToLoDTensorConverter(\n place=self.place,\n lod_level=lod_level,\n shape=shape,\n dtype=dtype))\n\n for each_sample in iterable:\n assert len(each_sample) == len(converter), (\n \"The number of fields in data (%s) does not match \" +\n \"len(feed_list) (%s)\") % (len(each_sample), len(converter))\n for each_converter, each_slot in six.zip(converter, each_sample):\n each_converter.feed(each_slot)\n ret_dict = {}\n for each_name, each_converter in six.zip(self.feed_names, converter):\n ret_dict[each_name] = each_converter.done()\n return ret_dict\n\n def feed_parallel(self, iterable, num_places=None):\n if isinstance(self.place, core.CUDAPlace):\n places = [\n core.CUDAPlace(i)\n for i in six.xrange(self._get_number_of_places_(num_places))\n ]\n else:\n places = [\n core.CPUPlace()\n for _ in six.xrange(self._get_number_of_places_(num_places))\n ]\n\n if len(iterable) != len(places):\n raise ValueError(\"feed_parallel takes multiple mini-batches. Each \"\n \"mini-batch will be feed on each device. The \"\n \"number of devices and number of mini-batches \"\n \"must be same.\")\n\n place = self.place\n for p, batch in six.zip(places, iterable):\n self.place = p\n yield self.feed(batch)\n self.place = place\n\n def _get_number_of_places_(self, num_places):\n if num_places is not None:\n return int(num_places)\n elif isinstance(self.place, core.CUDAPlace):\n return core.get_cuda_device_count()\n else:\n return multiprocessing.cpu_count()\n\n def decorate_reader(self,\n reader,\n multi_devices,\n num_places=None,\n drop_last=True):\n def __reader_creator__():\n if not multi_devices:\n for item in reader():\n yield self.feed(item)\n else:\n num = self._get_number_of_places_(num_places)\n item = []\n for batch in reader():\n item.append(batch)\n if len(item) == num:\n yield list(self.feed_parallel(item, num))\n item = []\n if not drop_last and len(item) != 0:\n raise ValueError(\n \"The data batch which cannot fit for devices will be \"\n \"dropped is not implementation. Other strategies are \"\n \"not implemented\")\n\n return __reader_creator__\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport paddle.fluid.core as core\nfrom paddle.fluid.op import Operator\n\n\nclass TestBeamSearchDecodeOp(unittest.TestCase):\n def setUp(self):\n self.scope = core.Scope()\n self.place = core.CPUPlace()\n\n def append_lod_tensor(self, tensor_array, lod, data):\n lod_tensor = core.LoDTensor()\n lod_tensor.set_lod(lod)\n lod_tensor.set(data, self.place)\n tensor_array.append(lod_tensor)\n\n def test_get_set(self):\n ids = self.scope.var(\"ids\").get_lod_tensor_array()\n self.append_lod_tensor(\n ids, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]],\n np.array(\n [1, 2, 3, 4, 5, 6], dtype=\"int64\"))\n self.append_lod_tensor(\n ids, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]],\n np.array(\n [0, 1, 2, 3, 4, 5], dtype=\"int64\"))\n self.append_lod_tensor(\n ids, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]],\n np.array(\n [0, 1, 2, 3, 4], dtype=\"int64\"))\n\n scores = self.scope.var(\"scores\").get_lod_tensor_array()\n self.append_lod_tensor(\n scores, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]],\n np.array(\n [1, 2, 3, 4, 5, 6], dtype=\"float64\"))\n self.append_lod_tensor(\n scores, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]],\n np.array(\n [0, 1, 2, 3, 4, 5], dtype=\"float64\"))\n self.append_lod_tensor(\n scores, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]],\n np.array(\n [0, 1, 2, 3, 4], dtype=\"float64\"))\n\n sentence_ids = self.scope.var(\"sentence_ids\").get_tensor()\n sentence_scores = self.scope.var(\"sentence_scores\").get_tensor()\n\n beam_search_decode_op = Operator(\n \"beam_search_decode\",\n # inputs\n Ids=\"ids\",\n Scores=\"scores\",\n # outputs\n SentenceIds=\"sentence_ids\",\n SentenceScores=\"sentence_scores\")\n\n beam_search_decode_op.run(self.scope, self.place)\n\n expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]]\n self.assertEqual(sentence_ids.lod(), expected_lod)\n self.assertEqual(sentence_scores.lod(), expected_lod)\n\n expected_data = np.array(\n [2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], \"int64\")\n self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data))\n self.assertTrue(\n np.array_equal(np.array(sentence_scores), expected_data))\n\n\nclass TestBeamSearchDecodeOpGPU(TestBeamSearchDecodeOp):\n def setUp(self):\n self.scope = core.Scope()\n self.place = core.CUDAPlace(0)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport paddle.fluid as fluid\nimport paddle.fluid.profiler as profiler\nimport numpy\n\n\nclass BaseParallelForTest(unittest.TestCase):\n def run_test(self, callback, feed, fetch):\n \"\"\"\n Run the unittest for parallel.for\n Args:\n callback(callable): A callable function returns a generator. There \n are two yields in the generator function. The first yield \n returns the data layers, and the second yield returns the loss. \n The modified data variables will be sent back during the first \n yield.\n\n feed(dict): The executor feeding dictionary.\n fetch(list|basestr): The fetch name lists. \n\n Returns:\n None\n\n Raises:\n AssertionError when the computation of cpu, parallel.for in cpu, \n gpu, parallel.for in gpu are different.\n\n \"\"\"\n cpu = fluid.CPUPlace()\n result_cpu = self._run_test_impl_(\n callback=callback,\n feed=feed,\n fetch=fetch,\n place=cpu,\n use_parallel=False)\n result_cpu_parallel = self._run_test_impl_(\n callback=callback,\n feed=feed,\n fetch=fetch,\n place=cpu,\n use_parallel=True)\n if fluid.core.is_compiled_with_cuda():\n gpu = fluid.CUDAPlace(0)\n result_gpu = self._run_test_impl_(\n callback=callback,\n feed=feed,\n fetch=fetch,\n place=gpu,\n use_parallel=False,\n use_gpu=True)\n result_gpu_parallel = self._run_test_impl_(\n callback=callback,\n feed=feed,\n fetch=fetch,\n place=gpu,\n use_parallel=True,\n use_gpu=True)\n result_gpu_nccl = self._run_test_impl_(\n callback=callback,\n feed=feed,\n fetch=fetch,\n place=gpu,\n use_parallel=True,\n use_nccl=True,\n use_gpu=True)\n self._assert_same_(fetch, result_cpu, result_cpu_parallel,\n result_gpu, result_gpu_parallel, result_gpu_nccl)\n else:\n self._assert_same_(fetch, result_cpu, result_cpu_parallel)\n\n def _run_test_impl_(self,\n callback,\n feed,\n fetch,\n place,\n use_parallel=False,\n use_nccl=False,\n use_gpu=False):\n \"\"\"\n Run a single test, returns the fetch values\n Args:\n place(Place): the computation place. \n use_parallel(bool): Whether use parallel.for or not. \n\n Returns:\n Fetched numpy arrays.\n\n \"\"\"\n if isinstance(fetch, basestring):\n fetch = [fetch]\n main = fluid.Program()\n startup = fluid.Program()\n # Fix seed\n main.random_seed = 10\n startup.random_seed = 10\n\n with fluid.program_guard(main, startup):\n generator = callback()\n # Automatically insert parallel do if use_parallel = True\n if use_parallel:\n places = fluid.layers.get_places()\n pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl)\n data = next(generator)\n\n if isinstance(data, fluid.Variable):\n data = [data]\n\n with pd.do():\n ins = map(pd.read_input, data)\n if len(ins) == 1:\n ins = ins[0]\n loss = generator.send(ins) # patch input\n pd.write_output(loss)\n\n loss = pd()\n else:\n data = next(generator)\n loss = generator.send(data)\n self.assertIsNotNone(loss)\n avg_loss = fluid.layers.mean(loss)\n fluid.backward.append_backward(loss=avg_loss)\n\n exe = fluid.Executor(place)\n exe.run(startup)\n if use_gpu:\n profile_type = 'GPU'\n else:\n profile_type = 'CPU'\n with profiler.profiler(profile_type, 'total', '/tmp/profiler'):\n return exe.run(main, feed=feed, fetch_list=fetch)\n\n def _assert_same_(self, fetch, *args):\n \"\"\"\n Assert the return values of `run_test` are same.\n Args:\n fetch: Fetch list. Used for print error message\n *args: The fetch result lists of each situations.\n\n Returns:\n None\n \n Raises:\n AssertionError\n\n \"\"\"\n\n def _impl_(a, b, fetch_id, item_id):\n item_str = [\n 'CPU', 'ParallelCPU', 'GPU', 'ParallelGPU', 'ParallelGPUNCCL'\n ]\n flag = numpy.allclose(a, b, rtol=0.1, atol=1e-3)\n self.assertTrue(flag,\n \"The {0} are different in {1}, {2} vs {3}\".format(\n fetch[fetch_id], item_str[item_id], a, b))\n\n for i, items in enumerate(zip(*args)):\n self.assertGreater(len(items), 0)\n for j in range(1, len(items)):\n _impl_(items[0], items[j], fetch_id=i, item_id=j)\n\n\nclass ParallelOpTest(BaseParallelForTest):\n @staticmethod\n def __network__():\n x = fluid.layers.data(shape=[784], dtype='float32', name='img')\n x = yield x\n hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')\n hidden = fluid.layers.batch_norm(input=hidden)\n loss = fluid.layers.mean(hidden)\n yield loss\n\n def test_simple_fc(self):\n self.run_test(\n callback=self.__network__,\n feed={\n 'img': numpy.random.random(size=(51, 784)).astype('float32')\n },\n fetch=['fc1.w@GRAD'])\n\n def test_fc_with_tiny_data(self):\n self.run_test(\n callback=self.__network__,\n feed={'img': numpy.random.random(size=(1, 784)).astype('float32')},\n fetch=['fc1.w@GRAD'])\n\n\nclass ParallelOpTestMultipleInput(BaseParallelForTest):\n @staticmethod\n def __network__():\n x = fluid.layers.data(\n shape=[784], dtype='float32', name='img1', stop_gradient=False)\n y = fluid.layers.data(\n shape=[784], dtype='float32', name='img2', stop_gradient=False)\n yield [x, y]\n x = x + y\n hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')\n hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w')\n hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w')\n loss = fluid.layers.mean(hidden3)\n yield loss\n\n def test_simple_fc(self):\n self.run_test(\n callback=self.__network__,\n feed={\n 'img1': numpy.random.random(size=(51, 784)).astype('float32'),\n 'img2': numpy.random.random(size=(51, 784)).astype('float32')\n },\n fetch=['fc1.w@GRAD', 'fc2.w@GRAD', 'fc3.w@GRAD'])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import print_function\nimport argparse\nimport paddle.fluid as fluid\nimport paddle\nimport sys\nimport numpy\nimport unittest\nimport math\nimport sys\nimport os\n\nBATCH_SIZE = 64\n\n\ndef inference_program():\n img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')\n\n conv_pool_1 = fluid.nets.simple_img_conv_pool(\n input=img,\n filter_size=5,\n num_filters=20,\n pool_size=2,\n pool_stride=2,\n act=\"relu\")\n conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)\n conv_pool_2 = fluid.nets.simple_img_conv_pool(\n input=conv_pool_1,\n filter_size=5,\n num_filters=50,\n pool_size=2,\n pool_stride=2,\n act=\"relu\")\n prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')\n return prediction\n\n\ndef train_program():\n label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n predict = inference_program()\n cost = fluid.layers.cross_entropy(input=predict, label=label)\n avg_cost = fluid.layers.mean(cost)\n acc = fluid.layers.accuracy(input=predict, label=label)\n return [avg_cost, acc]\n\n\ndef train(use_cuda, train_program, params_dirname):\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n optimizer = fluid.optimizer.Adam(learning_rate=0.001)\n\n trainer = fluid.Trainer(\n train_func=train_program,\n place=place,\n optimizer=optimizer,\n parallel=True)\n\n def event_handler(event):\n if isinstance(event, fluid.EndEpochEvent):\n test_reader = paddle.batch(\n paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)\n avg_cost, acc = trainer.test(\n reader=test_reader, feed_order=['img', 'label'])\n\n print(\"avg_cost: %s\" % avg_cost)\n print(\"acc : %s\" % acc)\n\n if acc > 0.2: # Smaller value to increase CI speed\n trainer.save_params(params_dirname)\n else:\n print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(\n event.epoch + 1, avg_cost, acc))\n if math.isnan(avg_cost):\n sys.exit(\"got NaN loss, training failed.\")\n elif isinstance(event, fluid.EndStepEvent):\n print(\"Step {0}, Epoch {1} Metrics {2}\".format(\n event.step, event.epoch, map(numpy.array, event.metrics)))\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.mnist.train(), buf_size=500),\n batch_size=BATCH_SIZE)\n\n trainer.train(\n num_epochs=1,\n event_handler=event_handler,\n reader=train_reader,\n feed_order=['img', 'label'])\n\n\ndef infer(use_cuda, inference_program, params_dirname=None):\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n\n inferencer = fluid.Inferencer(\n infer_func=inference_program, param_path=params_dirname, place=place)\n\n batch_size = 1\n tensor_img = numpy.random.uniform(-1.0, 1.0,\n [batch_size, 1, 28, 28]).astype(\"float32\")\n\n results = inferencer.infer({'img': tensor_img})\n\n print(\"infer results: \", results[0])\n\n\ndef main(use_cuda):\n params_dirname = \"recognize_digits_conv.inference.model\"\n\n # call train() with is_local argument to run distributed train\n train(\n use_cuda=use_cuda,\n train_program=train_program,\n params_dirname=params_dirname)\n infer(\n use_cuda=use_cuda,\n inference_program=inference_program,\n params_dirname=params_dirname)\n\n\nif __name__ == '__main__':\n # for use_cuda in (False, True):\n main(use_cuda=True)\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\n\n\nclass TestMultiplexOp(OpTest):\n def setUp(self):\n self.op_type = \"multiplex\"\n rows = 4\n index = np.arange(0, rows).astype('int32')\n np.random.shuffle(index)\n index = np.reshape(index, (rows, 1))\n ins1 = np.random.random((rows, 10)).astype(\"float32\")\n ins2 = np.random.random((rows, 10)).astype(\"float32\")\n ins3 = np.random.random((rows, 10)).astype(\"float32\")\n ins4 = np.random.random((rows, 10)).astype(\"float32\")\n self.inputs = {\n 'Ids': index,\n 'X': [('x1', ins1), ('x2', ins2), ('x3', ins3), ('x4', ins4)]\n }\n # multiplex output\n output = np.zeros_like(ins1)\n for i in range(0, rows):\n k = index[i][0]\n output[i] = self.inputs['X'][k][1][i]\n self.outputs = {'Out': output}\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n self.check_grad(['x1', 'x2', 'x3', 'x4'], 'Out')\n\n def test_check_grad_ignore_x1(self):\n self.check_grad(['x2', 'x3', 'x4'], 'Out', no_grad_set=set('x1'))\n\n def test_check_grad_ignore_x1_x2(self):\n self.check_grad(['x3', 'x4'], 'Out', no_grad_set=set(['x1', 'x2']))\n\n def test_check_grad_ignore_x3(self):\n self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3'))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\n\n\nclass TestSignOp(OpTest):\n def setUp(self):\n self.op_type = \"sign\"\n self.inputs = {\n 'X': np.random.uniform(-10, 10, (10, 10)).astype(\"float32\")\n }\n self.outputs = {'Out': np.sign(self.inputs['X'])}\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nimport paddle.fluid.core as core\nfrom op_test import OpTest\n\n\nclass TestRandomCropOp(OpTest):\n def setUp(self):\n to_crop = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] *\n 5).astype(\"float32\")\n self.possible_res = [\n np.array([[1, 2, 3], [5, 6, 7]]), np.array([[2, 3, 4], [6, 7, 8]]),\n np.array([[5, 6, 7], [9, 10, 11]]),\n np.array([[6, 7, 8], [10, 11, 12]])\n ]\n self.op_type = \"random_crop\"\n self.inputs = {'X': to_crop, 'Seed': np.array([10])}\n self.outputs = {'Out': np.array([]), 'SeedOut': np.array([])}\n self.attrs = {'shape': [2, 3]}\n\n def test_check_output(self):\n self.check_output_customized(self.verify_output)\n\n def verify_output(self, outs):\n out = np.array(outs[1])\n for ins in out[:]:\n is_equal = [(ins == res).all() for res in self.possible_res]\n self.assertIn(True, is_equal)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.random",
"numpy.argwhere",
"numpy.ones",
"numpy.argmax",
"numpy.zeros"
],
[
"numpy.random.random",
"numpy.min",
"numpy.max",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.random.random",
"numpy.allclose"
],
[
"numpy.random.uniform"
],
[
"numpy.random.random",
"numpy.reshape",
"numpy.arange",
"numpy.random.shuffle",
"numpy.zeros_like"
],
[
"numpy.sign",
"numpy.random.uniform"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Felix-neko/catalyst | [
"df80986f1c12ef6a3776637453a0c04aaef0068c"
] | [
"catalyst/rl/scripts/run_samplers.py"
] | [
"#!/usr/bin/env python\n\nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\n\nimport copy # noqa E402\nimport time # noqa E402\nimport atexit # noqa E402\nimport argparse # noqa E402\nimport multiprocessing as mp # noqa E402\n\nimport torch # noqa E402\ntorch.set_num_threads(1)\n\nfrom catalyst.rl.core import Sampler, ValidSampler, \\\n ExplorationHandler # noqa E402\nfrom catalyst.rl.registry import \\\n OFFPOLICY_ALGORITHMS, ONPOLICY_ALGORITHMS, \\\n ENVIRONMENTS, DATABASES # noqa E402\nfrom catalyst.rl.scripts.misc import OFFPOLICY_ALGORITHMS_NAMES, \\\n ONPOLICY_ALGORITHMS_NAMES # noqa E402\nfrom catalyst.utils.config import parse_args_uargs # noqa E402\nfrom catalyst.utils import set_global_seed, boolean_flag # noqa E402\nfrom catalyst.utils.scripts import import_module # noqa E402\n\n\ndef build_args(parser):\n parser.add_argument(\n \"--config\",\n \"--configs\",\n \"-C\",\n nargs=\"+\",\n help=\"path to config/configs\",\n metavar=\"CONFIG_PATH\",\n dest=\"configs\",\n required=True\n )\n parser.add_argument(\"--expdir\", type=str, default=None)\n parser.add_argument(\"--logdir\", type=str, default=None)\n parser.add_argument(\"--resume\", type=str, default=None)\n parser.add_argument(\"--seed\", type=int, default=42)\n\n parser.add_argument(\"--train\", type=int, default=None)\n parser.add_argument(\"--valid\", type=int, default=None)\n parser.add_argument(\"--infer\", type=int, default=None)\n parser.add_argument(\"--vis\", type=int, default=None)\n\n boolean_flag(parser, \"check\", default=False)\n boolean_flag(parser, \"db\", default=True)\n\n parser.add_argument(\"--run-delay\", type=int, default=1)\n boolean_flag(parser, \"daemon\", default=True)\n parser.add_argument(\"--sampler-id\", type=int, default=0)\n\n return parser\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n build_args(parser)\n args, unknown_args = parser.parse_known_args()\n return args, unknown_args\n\n\ndef run_sampler(\n *,\n config,\n logdir,\n algorithm_fn,\n environment_fn,\n visualize,\n mode,\n seed=42,\n id=None,\n resume=None,\n db=True,\n exploration_power=1.0,\n sync_epoch=False\n):\n config_ = copy.deepcopy(config)\n id = 0 if id is None else id\n seed = seed + id\n set_global_seed(seed)\n\n db_server = DATABASES.get_from_params(\n **config.get(\"db\", {}), sync_epoch=sync_epoch\n ) if db else None\n\n env = environment_fn(\n **config_[\"environment\"],\n visualize=visualize,\n mode=mode,\n sampler_id=id,\n )\n agent = algorithm_fn.prepare_for_sampler(env_spec=env, config=config_)\n\n exploration_params = config_[\"sampler\"].pop(\"exploration_params\", None)\n exploration_handler = ExplorationHandler(env=env, *exploration_params) \\\n if exploration_params is not None \\\n else None\n if exploration_handler is not None:\n exploration_handler.set_power(exploration_power)\n\n seeds = dict(\n (k, config_[\"sampler\"].pop(f\"{k}_seeds\", None))\n for k in [\"train\", \"valid\", \"infer\"]\n )\n seeds = seeds[mode]\n\n if algorithm_fn in OFFPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"critic\" if env.discrete_actions else \"actor\"\n elif algorithm_fn in ONPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"actor\"\n else:\n # @TODO: add registry for algorithms, trainers, samplers\n raise NotImplementedError()\n\n if mode in [\"valid\"]:\n sampler_fn = ValidSampler\n else:\n sampler_fn = Sampler\n\n sampler = sampler_fn(\n agent=agent,\n env=env,\n db_server=db_server,\n exploration_handler=exploration_handler,\n logdir=logdir,\n id=id,\n mode=mode,\n weights_sync_mode=weights_sync_mode,\n seeds=seeds,\n **config_[\"sampler\"],\n )\n\n if resume is not None:\n sampler.load_checkpoint(filepath=resume)\n\n sampler.run()\n\n\ndef main(args, unknown_args):\n args, config = parse_args_uargs(args, unknown_args)\n\n args.vis = args.vis or 0\n args.infer = args.infer or 0\n args.valid = args.valid or 0\n args.train = args.train or 0\n\n if args.expdir is not None:\n module = import_module(expdir=args.expdir) # noqa: F841\n\n environment_name = config[\"environment\"].pop(\"environment\")\n environment_fn = ENVIRONMENTS.get(environment_name)\n\n algorithm_name = config[\"algorithm\"].pop(\"algorithm\")\n\n if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = OFFPOLICY_ALGORITHMS\n sync_epoch = False\n elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = ONPOLICY_ALGORITHMS\n sync_epoch = True\n else:\n raise NotImplementedError()\n\n algorithm_fn = ALGORITHMS.get(algorithm_name)\n\n processes = []\n sampler_id = args.sampler_id\n\n def on_exit():\n for p in processes:\n p.terminate()\n\n atexit.register(on_exit)\n\n params = dict(\n seed=args.seed,\n logdir=args.logdir,\n algorithm_fn=algorithm_fn,\n environment_fn=environment_fn,\n config=config,\n resume=args.resume,\n db=args.db,\n sync_epoch=sync_epoch\n )\n\n if args.check:\n mode = \"train\"\n mode = \"valid\" if (args.valid is not None and args.valid > 0) else mode\n mode = \"infer\" if (args.infer is not None and args.infer > 0) else mode\n params_ = dict(\n visualize=(args.vis is not None and args.vis > 0),\n mode=mode,\n id=sampler_id\n )\n run_sampler(**params, **params_)\n\n for i in range(args.vis):\n params_ = dict(\n visualize=True, mode=\"infer\", id=sampler_id, exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.infer):\n params_ = dict(\n visualize=False,\n mode=\"infer\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.valid):\n params_ = dict(\n visualize=False,\n mode=\"valid\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(1, args.train + 1):\n exploration_power = i / args.train\n params_ = dict(\n visualize=False,\n mode=\"train\",\n id=sampler_id,\n exploration_power=exploration_power\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for p in processes:\n p.join()\n\n\nif __name__ == \"__main__\":\n args, unknown_args = parse_args()\n main(args, unknown_args)\n"
] | [
[
"torch.set_num_threads"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jahau/addons | [
"11b842781b0f022830f35f2e6ee1cc93c80abe50",
"11b842781b0f022830f35f2e6ee1cc93c80abe50",
"11b842781b0f022830f35f2e6ee1cc93c80abe50",
"11b842781b0f022830f35f2e6ee1cc93c80abe50",
"11b842781b0f022830f35f2e6ee1cc93c80abe50"
] | [
"tensorflow_addons/image/interpolate_spline.py",
"tensorflow_addons/optimizers/stochastic_weight_averaging.py",
"tensorflow_addons/seq2seq/loss_test.py",
"tensorflow_addons/layers/poincare_test.py",
"tensorflow_addons/losses/quantiles.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Polyharmonic spline interpolation.\"\"\"\n\nimport tensorflow as tf\n\nEPSILON = 0.0000000001\n\n\ndef _cross_squared_distance_matrix(x, y):\n \"\"\"Pairwise squared distance between two (batch) matrices' rows (2nd dim).\n\n Computes the pairwise distances between rows of x and rows of y\n Args:\n x: [batch_size, n, d] float `Tensor`\n y: [batch_size, m, d] float `Tensor`\n\n Returns:\n squared_dists: [batch_size, n, m] float `Tensor`, where\n squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2\n \"\"\"\n x_norm_squared = tf.reduce_sum(tf.square(x), 2)\n y_norm_squared = tf.reduce_sum(tf.square(y), 2)\n\n # Expand so that we can broadcast.\n x_norm_squared_tile = tf.expand_dims(x_norm_squared, 2)\n y_norm_squared_tile = tf.expand_dims(y_norm_squared, 1)\n\n x_y_transpose = tf.matmul(x, y, adjoint_b=True)\n\n # squared_dists[b,i,j] = ||x_bi - y_bj||^2 =\n # x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj\n squared_dists = (\n x_norm_squared_tile - 2 * x_y_transpose + y_norm_squared_tile)\n\n return squared_dists\n\n\ndef _pairwise_squared_distance_matrix(x):\n \"\"\"Pairwise squared distance among a (batch) matrix's rows (2nd dim).\n\n This saves a bit of computation vs. using\n _cross_squared_distance_matrix(x,x)\n\n Args:\n x: `[batch_size, n, d]` float `Tensor`\n\n Returns:\n squared_dists: `[batch_size, n, n]` float `Tensor`, where\n squared_dists[b,i,j] = ||x[b,i,:] - x[b,j,:]||^2\n \"\"\"\n\n x_x_transpose = tf.matmul(x, x, adjoint_b=True)\n x_norm_squared = tf.linalg.diag_part(x_x_transpose)\n x_norm_squared_tile = tf.expand_dims(x_norm_squared, 2)\n\n # squared_dists[b,i,j] = ||x_bi - x_bj||^2 =\n # = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj\n squared_dists = x_norm_squared_tile - 2 * x_x_transpose + tf.transpose(\n x_norm_squared_tile, [0, 2, 1])\n\n return squared_dists\n\n\ndef _solve_interpolation(train_points, train_values, order,\n regularization_weight):\n \"\"\"Solve for interpolation coefficients.\n\n Computes the coefficients of the polyharmonic interpolant for the\n 'training' data defined by (train_points, train_values) using the kernel\n phi.\n\n Args:\n train_points: `[b, n, d]` interpolation centers\n train_values: `[b, n, k]` function values\n order: order of the interpolation\n regularization_weight: weight to place on smoothness regularization term\n\n Returns:\n w: `[b, n, k]` weights on each interpolation center\n v: `[b, d, k]` weights on each input dimension\n Raises:\n ValueError: if d or k is not fully specified.\n \"\"\"\n\n # These dimensions are set dynamically at runtime.\n b, n, _ = tf.unstack(tf.shape(train_points), num=3)\n\n d = train_points.shape[-1]\n if d is None:\n raise ValueError('The dimensionality of the input points (d) must be '\n 'statically-inferrable.')\n\n k = train_values.shape[-1]\n if k is None:\n raise ValueError('The dimensionality of the output values (k) must be '\n 'statically-inferrable.')\n\n # First, rename variables so that the notation (c, f, w, v, A, B, etc.)\n # follows https://en.wikipedia.org/wiki/Polyharmonic_spline.\n # To account for python style guidelines we use\n # matrix_a for A and matrix_b for B.\n\n c = train_points\n f = train_values\n\n # Next, construct the linear system.\n with tf.name_scope('construct_linear_system'):\n\n matrix_a = _phi(_pairwise_squared_distance_matrix(c),\n order) # [b, n, n]\n if regularization_weight > 0:\n batch_identity_matrix = tf.expand_dims(tf.eye(n, dtype=c.dtype), 0)\n matrix_a += regularization_weight * batch_identity_matrix\n\n # Append ones to the feature values for the bias term\n # in the linear model.\n ones = tf.ones_like(c[..., :1], dtype=c.dtype)\n matrix_b = tf.concat([c, ones], 2) # [b, n, d + 1]\n\n # [b, n + d + 1, n]\n left_block = tf.concat(\n [matrix_a, tf.transpose(matrix_b, [0, 2, 1])], 1)\n\n num_b_cols = matrix_b.get_shape()[2] # d + 1\n lhs_zeros = tf.zeros([b, num_b_cols, num_b_cols], train_points.dtype)\n right_block = tf.concat([matrix_b, lhs_zeros],\n 1) # [b, n + d + 1, d + 1]\n lhs = tf.concat([left_block, right_block],\n 2) # [b, n + d + 1, n + d + 1]\n\n rhs_zeros = tf.zeros([b, d + 1, k], train_points.dtype)\n rhs = tf.concat([f, rhs_zeros], 1) # [b, n + d + 1, k]\n\n # Then, solve the linear system and unpack the results.\n with tf.name_scope('solve_linear_system'):\n w_v = tf.linalg.solve(lhs, rhs)\n w = w_v[:, :n, :]\n v = w_v[:, n:, :]\n\n return w, v\n\n\ndef _apply_interpolation(query_points, train_points, w, v, order):\n \"\"\"Apply polyharmonic interpolation model to data.\n\n Given coefficients w and v for the interpolation model, we evaluate\n interpolated function values at query_points.\n\n Args:\n query_points: `[b, m, d]` x values to evaluate the interpolation at\n train_points: `[b, n, d]` x values that act as the interpolation centers\n ( the c variables in the wikipedia article)\n w: `[b, n, k]` weights on each interpolation center\n v: `[b, d, k]` weights on each input dimension\n order: order of the interpolation\n\n Returns:\n Polyharmonic interpolation evaluated at points defined in query_points.\n \"\"\"\n\n # First, compute the contribution from the rbf term.\n pairwise_dists = _cross_squared_distance_matrix(query_points, train_points)\n phi_pairwise_dists = _phi(pairwise_dists, order)\n\n rbf_term = tf.matmul(phi_pairwise_dists, w)\n\n # Then, compute the contribution from the linear term.\n # Pad query_points with ones, for the bias term in the linear model.\n query_points_pad = tf.concat([\n query_points,\n tf.ones_like(query_points[..., :1], train_points.dtype)\n ], 2)\n linear_term = tf.matmul(query_points_pad, v)\n\n return rbf_term + linear_term\n\n\ndef _phi(r, order):\n \"\"\"Coordinate-wise nonlinearity used to define the order of the\n interpolation.\n\n See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.\n\n Args:\n r: input op\n order: interpolation order\n\n Returns:\n phi_k evaluated coordinate-wise on r, for k = r\n \"\"\"\n\n # using EPSILON prevents log(0), sqrt0), etc.\n # sqrt(0) is well-defined, but its gradient is not\n with tf.name_scope('phi'):\n if order == 1:\n r = tf.maximum(r, EPSILON)\n r = tf.sqrt(r)\n return r\n elif order == 2:\n return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON))\n elif order == 4:\n return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON))\n elif order % 2 == 0:\n r = tf.maximum(r, EPSILON)\n return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r)\n else:\n r = tf.maximum(r, EPSILON)\n return tf.pow(r, 0.5 * order)\n\n\ndef interpolate_spline(train_points,\n train_values,\n query_points,\n order,\n regularization_weight=0.0,\n name='interpolate_spline'):\n r\"\"\"Interpolate signal using polyharmonic interpolation.\n\n The interpolant has the form\n $$f(x) = \\sum_{i = 1}^n w_i \\phi(||x - c_i||) + v^T x + b.$$\n\n This is a sum of two terms: (1) a weighted sum of radial basis function\n (RBF) terms, with the centers \\\\(c_1, ... c_n\\\\), and (2) a linear term\n with a bias. The \\\\(c_i\\\\) vectors are 'training' points.\n In the code, b is absorbed into v\n by appending 1 as a final dimension to x. The coefficients w and v are\n estimated such that the interpolant exactly fits the value of the function\n at the \\\\(c_i\\\\) points, the vector w is orthogonal to each \\\\(c_i\\\\),\n and the vector w sums to 0. With these constraints, the coefficients\n can be obtained by solving a linear system.\n\n \\\\(\\phi\\\\) is an RBF, parametrized by an interpolation\n order. Using order=2 produces the well-known thin-plate spline.\n\n We also provide the option to perform regularized interpolation. Here, the\n interpolant is selected to trade off between the squared loss on the\n training data and a certain measure of its curvature\n ([details](https://en.wikipedia.org/wiki/Polyharmonic_spline)).\n Using a regularization weight greater than zero has the effect that the\n interpolant will no longer exactly fit the training data. However, it may\n be less vulnerable to overfitting, particularly for high-order\n interpolation.\n\n Note the interpolation procedure is differentiable with respect to all\n inputs besides the order parameter.\n\n We support dynamically-shaped inputs, where batch_size, n, and m are None\n at graph construction time. However, d and k must be known.\n\n Args:\n train_points: `[batch_size, n, d]` float `Tensor` of n d-dimensional\n locations. These do not need to be regularly-spaced.\n train_values: `[batch_size, n, k]` float `Tensor` of n c-dimensional\n values evaluated at train_points.\n query_points: `[batch_size, m, d]` `Tensor` of m d-dimensional locations\n where we will output the interpolant's values.\n order: order of the interpolation. Common values are 1 for\n \\\\(\\phi(r) = r\\\\), 2 for \\\\(\\phi(r) = r^2 * log(r)\\\\)\n (thin-plate spline), or 3 for \\\\(\\phi(r) = r^3\\\\).\n regularization_weight: weight placed on the regularization term.\n This will depend substantially on the problem, and it should always be\n tuned. For many problems, it is reasonable to use no regularization.\n If using a non-zero value, we recommend a small value like 0.001.\n name: name prefix for ops created by this function\n\n Returns:\n `[b, m, k]` float `Tensor` of query values. We use train_points and\n train_values to perform polyharmonic interpolation. The query values are\n the values of the interpolant evaluated at the locations specified in\n query_points.\n \"\"\"\n with tf.name_scope(name or \"interpolate_spline\"):\n train_points = tf.convert_to_tensor(train_points)\n train_values = tf.convert_to_tensor(train_values)\n query_points = tf.convert_to_tensor(query_points)\n\n # First, fit the spline to the observed data.\n with tf.name_scope('solve'):\n w, v = _solve_interpolation(train_points, train_values, order,\n regularization_weight)\n\n # Then, evaluate the spline at the query locations.\n with tf.name_scope('predict'):\n query_values = _apply_interpolation(query_points, train_points, w,\n v, order)\n\n return query_values\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An implementation of the Stochastic Weight Averaging optimizer.\n\nThe Stochastic Weight Averaging mechanism was proposed by Pavel Izmailov\net. al in the paper [Averaging Weights Leads to Wider Optima and Better\nGeneralization](https://arxiv.org/abs/1803.05407). The optimizer\nimplements averaging of multiple points along the trajectory of SGD.\nThis averaging has shown to improve model performance on validation/test\nsets whilst possibly causing a small increase in loss on the training\nset.\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_addons.optimizers.average_wrapper import AveragedOptimizerWrapper\n\n\[email protected]_keras_serializable(package='Addons')\nclass SWA(AveragedOptimizerWrapper):\n \"\"\"This class extends optimizers with Stochastic Weight Averaging (SWA).\n\n The Stochastic Weight Averaging mechanism was proposed by Pavel Izmailov\n et. al in the paper [Averaging Weights Leads to Wider Optima and\n Better Generalization](https://arxiv.org/abs/1803.05407). The optimizer\n implements averaging of multiple points along the trajectory of SGD. The\n optimizer expects an inner optimizer which will be used to apply the\n gradients to the variables and itself computes a running average of the\n variables every `k` steps (which generally corresponds to the end\n of a cycle when a cyclic learning rate is employed).\n\n We also allow the specification of the number of steps averaging\n should first happen after. Let's say, we want averaging to happen every `k`\n steps after the first `m` steps. After step `m` we'd take a snapshot of the\n variables and then average the weights appropriately at step `m + k`,\n `m + 2k` and so on. The assign_average_vars function can be called at the\n end of training to obtain the averaged_weights from the optimizer.\n\n Note: If your model has batch-normalization layers you would need to run\n the final weights through the data to compute the running mean and\n variance corresponding to the activations for each layer of the network.\n From the paper: If the DNN uses batch normalization we run one\n additional pass over the data, to compute the running mean and standard\n deviation of the activations for each layer of the network with SWA\n weights after the training is finished, since these statistics are not\n collected during training. For most deep learning libraries, such as\n PyTorch or Tensorflow, one can typically collect these statistics by\n making a forward pass over the data in training mode\n ([Averaging Weights Leads to Wider Optima and Better\n Generalization](https://arxiv.org/abs/1803.05407))\n\n Example of usage:\n\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate)\n opt = tfa.optimizers.SWA(opt, start_averaging=m, average_period=k)\n ```\n \"\"\"\n\n def __init__(self,\n optimizer,\n start_averaging=0,\n average_period=10,\n name='SWA',\n sequential_update=True,\n **kwargs):\n r\"\"\"Wrap optimizer with the Stochastic Weight Averaging mechanism.\n\n Args:\n optimizer: The original optimizer that will be used to compute and\n apply the gradients.\n start_averaging: An integer. Threshold to start averaging using \n SWA. Averaging only occurs at `start_averaging` iters, must\n be >= 0. If start_averaging = m, the first snapshot will be \n taken after the mth application of gradients (where the first\n iteration is iteration 0).\n average_period: An integer. The synchronization period of SWA. The\n averaging occurs every average_period steps. Averaging period\n needs to be >= 1.\n name: Optional name for the operations created when applying\n gradients. Defaults to 'SWA'.\n sequential_update: Bool. If False, will compute the moving average\n at the same time as the model is updated, potentially doing\n benign data races. If True, will update the moving average\n after gradient updates.\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, \n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by \n norm; `clipvalue` is clip gradients by value, `decay` is \n included for backward compatibility to allow time inverse \n decay of learning rate. `lr` is included for backward \n compatibility, recommended to use `learning_rate` instead.\n \"\"\"\n super().__init__(optimizer, sequential_update, name, **kwargs)\n\n if average_period < 1:\n raise ValueError('average_period must be >= 1')\n if start_averaging < 0:\n raise ValueError('start_averaging must be >= 0')\n\n self._set_hyper('average_period', average_period)\n self._set_hyper('start_averaging', start_averaging)\n\n def average_op(self, var, average_var):\n average_period = self._get_hyper('average_period', tf.dtypes.int64)\n start_averaging = self._get_hyper('start_averaging', tf.dtypes.int64)\n # check if the correct number of iterations has taken place to start\n # averaging.\n thresold_cond = tf.greater_equal(self.iterations, start_averaging)\n # number of times snapshots of weights have been taken (using max to\n # avoid negative values of num_snapshots).\n num_snapshots = tf.math.maximum(\n tf.cast(0, tf.int64),\n tf.math.floordiv(self.iterations - start_averaging,\n average_period))\n # checks if the iteration is one in which a snapshot should be taken.\n sync_cond = tf.equal(start_averaging + num_snapshots * average_period,\n self.iterations)\n num_snapshots = tf.cast(num_snapshots, tf.float32)\n average_value = (\n (average_var * num_snapshots + var) / (num_snapshots + 1.))\n average_cond = tf.reduce_all([thresold_cond, sync_cond])\n with tf.control_dependencies([average_value]):\n average_update = average_var.assign(\n tf.where(\n average_cond,\n average_value,\n average_var,\n ),\n use_locking=self._use_locking)\n return average_update\n\n def get_config(self):\n config = {\n 'average_period': self._serialize_hyperparameter('average_period'),\n 'start_averaging':\n self._serialize_hyperparameter('start_averaging')\n }\n base_config = super().get_config()\n return {**base_config, **config}\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.addons.seq2seq.python.loss_ops.\"\"\"\n\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.seq2seq import loss\nfrom tensorflow_addons.utils import test_utils\n\n\n@test_utils.run_all_in_graph_and_eager_modes\nclass LossTest(tf.test.TestCase):\n def setup(self):\n self.batch_size = 2\n self.sequence_length = 3\n self.number_of_classes = 5\n logits = [\n tf.constant(\n i + 0.5, shape=[self.batch_size, self.number_of_classes])\n for i in range(self.sequence_length)\n ]\n self.logits = tf.stack(logits, axis=1)\n targets = [\n tf.constant(i, tf.int32, shape=[self.batch_size])\n for i in range(self.sequence_length)\n ]\n self.targets = tf.stack(targets, axis=1)\n weights = [\n tf.constant(1.0, shape=[self.batch_size])\n for _ in range(self.sequence_length)\n ]\n self.weights = tf.stack(weights, axis=1)\n # expected_loss = sparse_softmax_cross_entropy_with_logits(targets,\n # logits) where targets = [0, 1, 2],\n # and logits = [[0.5] * 5, [1.5] * 5, [2.5] * 5]\n self.expected_loss = 1.60944\n\n def testSequenceLoss(self):\n with self.cached_session(use_gpu=True):\n self.setup()\n average_loss_per_example = loss.sequence_loss(\n self.logits,\n self.targets,\n self.weights,\n average_across_timesteps=True,\n average_across_batch=True)\n res = self.evaluate(average_loss_per_example)\n self.assertAllClose(self.expected_loss, res)\n\n average_loss_per_sequence = loss.sequence_loss(\n self.logits,\n self.targets,\n self.weights,\n average_across_timesteps=False,\n average_across_batch=True)\n res = self.evaluate(average_loss_per_sequence)\n compare_per_sequence = np.full((self.sequence_length),\n self.expected_loss)\n self.assertAllClose(compare_per_sequence, res)\n\n average_loss_per_batch = loss.sequence_loss(\n self.logits,\n self.targets,\n self.weights,\n average_across_timesteps=True,\n average_across_batch=False)\n res = self.evaluate(average_loss_per_batch)\n compare_per_batch = np.full((self.batch_size), self.expected_loss)\n self.assertAllClose(compare_per_batch, res)\n\n total_loss = loss.sequence_loss(\n self.logits,\n self.targets,\n self.weights,\n average_across_timesteps=False,\n average_across_batch=False)\n res = self.evaluate(total_loss)\n compare_total = np.full((self.batch_size, self.sequence_length),\n self.expected_loss)\n self.assertAllClose(compare_total, res)\n\n def testSequenceLossClass(self):\n with self.cached_session(use_gpu=True):\n self.setup()\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=True,\n average_across_batch=True,\n sum_over_timesteps=False,\n sum_over_batch=False)\n average_loss_per_example = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_example)\n self.assertAllClose(self.expected_loss, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=True,\n sum_over_timesteps=False,\n sum_over_batch=False)\n average_loss_per_sequence = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_sequence)\n compare_per_sequence = np.full((self.sequence_length),\n self.expected_loss)\n self.assertAllClose(compare_per_sequence, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=True,\n average_across_batch=False,\n sum_over_timesteps=False,\n sum_over_batch=False)\n average_loss_per_batch = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_batch)\n compare_per_batch = np.full((self.batch_size), self.expected_loss)\n self.assertAllClose(compare_per_batch, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=False,\n sum_over_batch=False)\n total_loss = seq_loss(self.targets, self.logits, self.weights)\n res = self.evaluate(total_loss)\n compare_total = np.full((self.batch_size, self.sequence_length),\n self.expected_loss)\n self.assertAllClose(compare_total, res)\n\n def testSumReduction(self):\n with self.cached_session(use_gpu=True):\n self.setup()\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=True,\n sum_over_batch=True)\n average_loss_per_example = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_example)\n self.assertAllClose(self.expected_loss, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=False,\n sum_over_batch=True)\n average_loss_per_sequence = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_sequence)\n compare_per_sequence = np.full((self.sequence_length),\n self.expected_loss)\n self.assertAllClose(compare_per_sequence, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=True,\n sum_over_batch=False)\n average_loss_per_batch = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_batch)\n compare_per_batch = np.full((self.batch_size), self.expected_loss)\n self.assertAllClose(compare_per_batch, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=False,\n sum_over_batch=False)\n total_loss = seq_loss(self.targets, self.logits, self.weights)\n res = self.evaluate(total_loss)\n compare_total = np.full((self.batch_size, self.sequence_length),\n self.expected_loss)\n self.assertAllClose(compare_total, res)\n\n def testWeightedSumReduction(self):\n self.setup()\n weights = [\n tf.constant(1.0, shape=[self.batch_size])\n for _ in range(self.sequence_length)\n ]\n # Make the last element in the sequence to have zero weights.\n weights[-1] = tf.constant(0.0, shape=[self.batch_size])\n self.weights = tf.stack(weights, axis=1)\n with self.cached_session(use_gpu=True):\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=True,\n sum_over_batch=True)\n average_loss_per_example = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_example)\n self.assertAllClose(self.expected_loss, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=False,\n sum_over_batch=True)\n average_loss_per_sequence = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_sequence)\n compare_per_sequence = np.full((self.sequence_length),\n self.expected_loss)\n # The last element in every sequence are zeros, which will be\n # filtered.\n compare_per_sequence[-1] = 0.\n self.assertAllClose(compare_per_sequence, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=True,\n sum_over_batch=False)\n average_loss_per_batch = seq_loss(self.targets, self.logits,\n self.weights)\n res = self.evaluate(average_loss_per_batch)\n compare_per_batch = np.full((self.batch_size), self.expected_loss)\n self.assertAllClose(compare_per_batch, res)\n\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=False,\n sum_over_timesteps=False,\n sum_over_batch=False)\n total_loss = seq_loss(self.targets, self.logits, self.weights)\n res = self.evaluate(total_loss)\n compare_total = np.full((self.batch_size, self.sequence_length),\n self.expected_loss)\n # The last element in every sequence are zeros, which will be\n # filtered.\n compare_total[:, -1] = 0\n self.assertAllClose(compare_total, res)\n\n def testZeroWeights(self):\n self.setup()\n weights = [\n tf.constant(0.0, shape=[self.batch_size])\n for _ in range(self.sequence_length)\n ]\n weights = tf.stack(weights, axis=1)\n with self.test_session(use_gpu=True):\n average_loss_per_example = loss.sequence_loss(\n self.logits,\n self.targets,\n weights,\n average_across_timesteps=True,\n average_across_batch=True)\n res = self.evaluate(average_loss_per_example)\n self.assertAllClose(0.0, res)\n\n average_loss_per_sequence = loss.sequence_loss(\n self.logits,\n self.targets,\n weights,\n average_across_timesteps=False,\n average_across_batch=True)\n res = self.evaluate(average_loss_per_sequence)\n compare_per_sequence = np.zeros(self.sequence_length)\n self.assertAllClose(compare_per_sequence, res)\n\n average_loss_per_batch = loss.sequence_loss(\n self.logits,\n self.targets,\n weights,\n average_across_timesteps=True,\n average_across_batch=False)\n res = self.evaluate(average_loss_per_batch)\n compare_per_batch = np.zeros(self.batch_size)\n self.assertAllClose(compare_per_batch, res)\n\n total_loss = loss.sequence_loss(\n self.logits,\n self.targets,\n weights,\n average_across_timesteps=False,\n average_across_batch=False)\n res = self.evaluate(total_loss)\n compare_total = np.zeros((self.batch_size, self.sequence_length))\n self.assertAllClose(compare_total, res)\n\n def testAmbiguousOrder(self):\n with self.assertRaisesRegexp(ValueError, 'because of ambiguous order'):\n with self.cached_session(use_gpu=True):\n self.setup()\n seq_loss = loss.SequenceLoss(\n average_across_timesteps=False,\n average_across_batch=True,\n sum_over_timesteps=True,\n sum_over_batch=False)\n self.evaluate(\n seq_loss(self.targets, self.logits, self.weights))\n\n\n@test_utils.run_all_in_graph_and_eager_modes\nclass DenseTargetLossTest(LossTest):\n def setup(self):\n super().setup()\n self.targets = tf.one_hot(self.targets, depth=self.number_of_classes)\n\n def testKerasCompatibility(self):\n \"\"\"To test the compatibility of SequenceLoss with Keras's built-in\n training loops, we create a fake model which always outputs a pre-\n defined set of logits.\n\n Then we check the calculated loss to be equal to the expected\n loss. Note that since the fake model doesn't have any trainable\n parameters, no matter how many steps we train it, it always\n outputs the same loss value.\n \"\"\"\n with self.cached_session(use_gpu=True):\n self.setup()\n\n def return_logits(x):\n batch_size = tf.shape(x)[0]\n logits_single_row = self.logits[0, :, :]\n logits_batch = tf.tile(\n tf.expand_dims(logits_single_row, 0), [batch_size, 1, 1])\n return logits_batch\n\n inp = tf.keras.layers.Input(shape=(self.sequence_length,))\n out = tf.keras.layers.Lambda(\n return_logits,\n output_shape=(self.sequence_length,\n self.number_of_classes))(inp)\n model = tf.keras.models.Model(inp, out)\n\n loss_obj = loss.SequenceLoss()\n model.compile(\n optimizer='adam', loss=loss_obj, sample_weight_mode=\"temporal\")\n\n # This is a fake input.\n x = tf.ones(shape=(self.batch_size, self.sequence_length))\n\n h = model.fit(\n x,\n self.targets,\n sample_weight=self.weights,\n batch_size=self.batch_size,\n steps_per_epoch=1)\n\n calculated_loss = h.history['loss'][0]\n self.assertAllClose(calculated_loss, self.expected_loss)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for PoincareNormalize layer.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.layers.poincare import PoincareNormalize\nfrom tensorflow_addons.utils import test_utils\n\n\n@test_utils.run_all_in_graph_and_eager_modes\nclass PoincareNormalizeTest(tf.test.TestCase):\n def _PoincareNormalize(self, x, dim, epsilon=1e-5):\n if isinstance(dim, list):\n norm = np.linalg.norm(x, axis=tuple(dim))\n for d in dim:\n norm = np.expand_dims(norm, d)\n norm_x = ((1. - epsilon) * x) / norm\n else:\n norm = np.expand_dims(\n np.apply_along_axis(np.linalg.norm, dim, x), dim)\n norm_x = ((1. - epsilon) * x) / norm\n return np.where(norm > 1.0 - epsilon, norm_x, x)\n\n def testPoincareNormalize(self):\n x_shape = [20, 7, 3]\n epsilon = 1e-5\n tol = 1e-6\n np.random.seed(1)\n inputs = np.random.random_sample(x_shape).astype(np.float32)\n\n for dim in range(len(x_shape)):\n outputs_expected = self._PoincareNormalize(inputs, dim, epsilon)\n\n outputs = test_utils.layer_test(\n PoincareNormalize,\n kwargs={\n 'axis': dim,\n 'epsilon': epsilon\n },\n input_data=inputs,\n expected_output=outputs_expected)\n for y in outputs_expected, outputs:\n norm = np.linalg.norm(y, axis=dim)\n self.assertLessEqual(norm.max(), 1. - epsilon + tol)\n\n def testPoincareNormalizeDimArray(self):\n x_shape = [20, 7, 3]\n epsilon = 1e-5\n tol = 1e-6\n np.random.seed(1)\n inputs = np.random.random_sample(x_shape).astype(np.float32)\n dim = [1, 2]\n\n outputs_expected = self._PoincareNormalize(inputs, dim, epsilon)\n\n outputs = test_utils.layer_test(\n PoincareNormalize,\n kwargs={\n 'axis': dim,\n 'epsilon': epsilon\n },\n input_data=inputs,\n expected_output=outputs_expected)\n for y in outputs_expected, outputs:\n norm = np.linalg.norm(y, axis=tuple(dim))\n self.assertLessEqual(norm.max(), 1. - epsilon + tol)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements quantiles losses.\"\"\"\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\[email protected]\ndef pinball_loss(y_true, y_pred, tau=.5):\n \"\"\"Computes the pinball loss between `y_true` and `y_pred`.\n\n `loss = maximum(tau * (y_true - y_pred), (tau - 1) * (y_true - y_pred))`\n\n In the context of regression this, loss yields an estimator of the tau\n conditional quantile.\n\n See: https://en.wikipedia.org/wiki/Quantile_regression\n\n Usage:\n ```python\n loss = pinball_loss([0., 0., 1., 1.], [1., 1., 1., 0.], tau=.1)\n\n # loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred))\n # = (0.9 + 0.9 + 0 + 0.1) / 4\n\n print('Loss: ', loss.numpy()) # Loss: 0.475\n ```\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`\n tau: (Optional) Float in [0, 1] or a tensor taking values in [0, 1] and\n shape = `[d0,..., dn]`. It defines the slope of the pinball loss. In\n the context of quantile regression, the value of tau determines the\n conditional quantile level. When tau = 0.5, this amounts to l1\n regression, an estimator of the conditional median (0.5 quantile).\n\n Returns:\n pinball_loss: 1-D float `Tensor` with shape [batch_size].\n\n References:\n - https://en.wikipedia.org/wiki/Quantile_regression\n - https://projecteuclid.org/download/pdfview_1/euclid.bj/1297173840\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n # broadcast the pinball slope along the batch dimension, and clip to\n # acceptable values\n tau = tf.expand_dims(tf.cast(tau, y_pred.dtype), 0)\n one = tf.cast(1, tau.dtype)\n\n delta_y = y_true - y_pred\n pinball = tf.math.maximum(tau * delta_y, (tau - one) * delta_y)\n return tf.reduce_mean(tf.keras.backend.batch_flatten(pinball), axis=-1)\n\n\[email protected]_keras_serializable(package='Addons')\nclass PinballLoss(tf.keras.losses.Loss):\n \"\"\"Computes the pinball loss between `y_true` and `y_pred`.\n\n `loss = maximum(tau * (y_true - y_pred), (tau - 1) * (y_true - y_pred))`\n\n In the context of regression, this loss yields an estimator of the tau\n conditional quantile.\n\n See: https://en.wikipedia.org/wiki/Quantile_regression\n\n Usage:\n ```python\n pinball = tfa.losses.PinballLoss(tau=.1)\n loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.])\n\n # loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred))\n # = (0.9 + 0.9 + 0 + 0.1) / 4\n\n print('Loss: ', loss.numpy()) # Loss: 0.475\n ```\n\n Usage with the `compile` API:\n\n ```python\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tfa.losses.PinballLoss(tau=.1))\n ```\n\n Args:\n tau: (Optional) Float in [0, 1] or a tensor taking values in [0, 1] and\n shape = `[d0,..., dn]`. It defines the slope of the pinball loss. In\n the context of quantile regression, the value of tau determines the\n conditional quantile level. When tau = 0.5, this amounts to l1\n regression, an estimator of the conditional median (0.5 quantile).\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`.\n When used with `tf.distribute.Strategy`, outside of built-in training\n loops such as `tf.keras` `compile` and `fit`, using `AUTO` or\n `SUM_OVER_BATCH_SIZE` will raise an error. Please see\n https://www.tensorflow.org/alpha/tutorials/distribute/training_loops\n for more details on this.\n name: Optional name for the op.\n\n References:\n - https://en.wikipedia.org/wiki/Quantile_regression\n - https://projecteuclid.org/download/pdfview_1/euclid.bj/1297173840\n \"\"\"\n\n def __init__(self,\n tau=.5,\n reduction=tf.keras.losses.Reduction.AUTO,\n name='pinball_loss'):\n super().__init__(reduction=reduction, name=name)\n self.tau = tau\n\n def call(self, y_true, y_pred):\n return pinball_loss(y_true, y_pred, self.tau)\n\n def get_config(self):\n config = {\n 'tau': self.tau,\n }\n base_config = super().get_config()\n return {**base_config, **config}\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.matmul",
"tensorflow.linalg.diag_part",
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.maximum",
"tensorflow.pow",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.eye",
"tensorflow.math.log",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.sqrt",
"tensorflow.linalg.solve"
],
[
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.math.floordiv",
"tensorflow.where",
"tensorflow.greater_equal",
"tensorflow.reduce_all"
],
[
"tensorflow.constant",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.models.Model",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.test.main",
"tensorflow.ones",
"numpy.full",
"tensorflow.expand_dims",
"tensorflow.one_hot",
"numpy.zeros",
"tensorflow.keras.layers.Input"
],
[
"numpy.expand_dims",
"numpy.random.seed",
"tensorflow.test.main",
"numpy.random.random_sample",
"numpy.linalg.norm",
"numpy.apply_along_axis",
"numpy.where"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.backend.batch_flatten",
"tensorflow.cast",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.math.maximum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
awesome-archive/pycorrector | [
"022da83ab794d9f9ddc40caef67b0578e7e3f513"
] | [
"pycorrector/seq2seq/infer.py"
] | [
"# -*- coding: utf-8 -*-\n# Author: XuMing <[email protected]>\n# Brief: \n\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import load_model\n\nfrom pycorrector.seq2seq import cged_config as config\nfrom pycorrector.seq2seq.corpus_reader import CGEDReader, load_word_dict\nfrom pycorrector.seq2seq.reader import EOS_TOKEN, GO_TOKEN\nfrom pycorrector.utils.io_utils import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Infer(object):\n def __init__(self, config=None):\n train_path = config.train_path\n encoder_model_path = config.encoder_model_path\n decoder_model_path = config.decoder_model_path\n save_input_token_path = config.input_vocab_path\n save_target_token_path = config.target_vocab_path\n\n # load dict\n self.input_token_index = load_word_dict(save_input_token_path)\n self.target_token_index = load_word_dict(save_target_token_path)\n\n data_reader = CGEDReader(train_path)\n input_texts, target_texts = data_reader.build_dataset(train_path)\n self.max_input_texts_len = max([len(text) for text in input_texts])\n self.max_target_texts_len = max([len(text) for text in target_texts])\n logger.info(\"Data loaded.\")\n\n # load model\n self.encoder_model = load_model(encoder_model_path)\n self.decoder_model = load_model(decoder_model_path)\n logger.info(\"Loaded seq2seq model.\")\n self.graph = tf.get_default_graph()\n\n def _decode_sequence(self, encoder_input_data):\n decoded_sentence = ''\n with self.graph.as_default():\n # Encode the input as state vectors.\n states_value = self.encoder_model.predict(encoder_input_data)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1, 1, len(self.target_token_index)))\n # Populate the first character of target sequence with the start character.\n # first_char = encoder_input_data[0]\n target_seq[0, 0, self.target_token_index[GO_TOKEN]] = 1.0\n\n reverse_target_char_index = dict(\n (i, char) for char, i in self.target_token_index.items())\n\n for _ in range(self.max_target_texts_len):\n output_tokens, h, c = self.decoder_model.predict([target_seq] + states_value)\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_char = reverse_target_char_index[sampled_token_index]\n # Exit condition: either hit max length\n # or find stop character.\n if sampled_char == EOS_TOKEN:\n break\n decoded_sentence += sampled_char\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1, len(self.target_token_index)))\n target_seq[0, 0, sampled_token_index] = 1.0\n # Update states\n states_value = [h, c]\n return decoded_sentence\n\n def infer(self, input_text):\n encoder_input_data = np.zeros((1, self.max_input_texts_len, len(self.input_token_index)),\n dtype='float32')\n # one hot representation\n for i, char in enumerate(input_text):\n if char in self.input_token_index:\n encoder_input_data[0, i, self.input_token_index[char]] = 1.0\n # Take one sequence decoding.\n decoded_sentence = self._decode_sequence(encoder_input_data)\n logger.info('Input sentence:%s' % input_text)\n logger.info('Decoded sentence:%s' % decoded_sentence)\n\n\nif __name__ == \"__main__\":\n inference = Infer(config=config)\n inputs = [\n '由我起开始做。',\n '没有解决这个问题,',\n '由我起开始做。',\n '由我起开始做',\n '不能人类实现更美好的将来。',\n '这几年前时间,',\n '歌曲使人的感到快乐,',\n ]\n for i in inputs:\n inference.infer(i)\n\n while True:\n input_str = input('input your string:')\n inference.infer(input_str)\n"
] | [
[
"tensorflow.get_default_graph",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
rahulvigneswaran/TailCalibX | [
"0ed18cc8903715c0e31934c54226a53b1bbfc198"
] | [
"libs/models/DotProductClassifier.py"
] | [
"# Imports\nimport torch.nn as nn\nfrom os import path\nimport torch\nimport torch.nn.functional as F\n\nclass DotProduct_Classifier(nn.Module):\n def __init__(self, num_classes=1000, feat_dim=2048, *args):\n super(DotProduct_Classifier, self).__init__()\n self.fc = nn.Linear(feat_dim, num_classes)\n\n def forward(self, x, *args):\n x = self.fc(x)\n return x\n\n\ndef create_model(feat_dim, num_classes=1000, pretrain=False, pretrain_dir=None, *args):\n \"\"\"Initialize the model\n\n Args:\n feat_dim (int): output dimension of the previous feature extractor\n num_classes (int, optional): Number of classes. Defaults to 1000.\n\n Returns:\n Class: Model\n \"\"\"\n print(\"Loading Dot Product Classifier.\")\n clf = DotProduct_Classifier(num_classes, feat_dim)\n\n if pretrain:\n if path.exists(pretrain_dir):\n print(\"===> Load Pretrain Initialization for DotProductClassfier\")\n weights = torch.load(pretrain_dir)[\"state_dict_best\"][\"classifier\"]\n\n weights = {\n k: weights[\"module.\" + k]\n if \"module.\" + k in weights\n else clf.state_dict()[k]\n for k in clf.state_dict()\n }\n clf.load_state_dict(weights)\n else: \n raise Exception(f\"Pretrain path doesn't exist!!--{pretrain_dir}\")\n else:\n print(\"===> Train classifier from the scratch\")\n\n return clf\n"
] | [
[
"torch.nn.Linear",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pavivenkatesan/TicTacToe-RL-MM- | [
"fbaab6bb9574b82ae0d79c818ba74d049375bfd4"
] | [
"testing.py"
] | [
"import numpy as np\nfrom math import inf as infinity\nfrom itertools import product\nfrom collections import defaultdict\nimport random\nimport time\n\n# Initializing the Tic-Tac-Toe environment\n# Three rows-Three columns, creating an empty list of three empty lists\nstate_space = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]\n# No. of players = 2 : X & O\nplayers = ['X', 'O']\n\n\n# Defining the play state_value, player and the cell number\ndef play(sv, each_player, cell):\n if sv[int((cell - 1) / 3)][(cell - 1) % 3] is ' ':\n sv[int((cell - 1) / 3)][(cell - 1) % 3] = each_player\n else:\n cell = int(input(\" Choose again, Cell is not empty: \"))\n play(sv, each_player, cell)\n\n\n# Defining new state function: which traverse over rows and columns and returns new state\ndef new(state):\n ns = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]\n for i in range(3):\n for j in range(3):\n ns[i][j] = state[i][j]\n return ns\n\n\n# Determining the current state value and determining the win\ndef cur_state(state_space):\n if (state_space[0][0] == state_space[0][1] and state_space[0][1] == state_space[0][2] and state_space[0][\n 0] is not ' '):\n return state_space[0][0], \"Done\"\n if (state_space[1][0] == state_space[1][1] and state_space[1][1] == state_space[1][2] and state_space[1][\n 0] is not ' '):\n return state_space[1][0], \"Done\"\n if (state_space[2][0] == state_space[2][1] and state_space[2][1] == state_space[2][2] and state_space[2][\n 0] is not ' '):\n return state_space[2][0], \"Done\"\n\n if (state_space[0][0] == state_space[1][0] and state_space[1][0] == state_space[2][0] and state_space[0][\n 0] is not ' '):\n return state_space[0][0], \"Done\"\n if (state_space[0][1] == state_space[1][1] and state_space[1][1] == state_space[2][1] and state_space[0][\n 1] is not ' '):\n return state_space[0][1], \"Done\"\n if (state_space[0][2] == state_space[1][2] and state_space[1][2] == state_space[2][2] and state_space[0][\n 2] is not ' '):\n return state_space[0][2], \"Done\"\n\n if (state_space[0][0] == state_space[1][1] and state_space[1][1] == state_space[2][2] and state_space[0][\n 0] is not ' '):\n return state_space[1][1], \"Done\"\n if (state_space[2][0] == state_space[1][1] and state_space[1][1] == state_space[0][2] and state_space[2][\n 0] is not ' '):\n return state_space[1][1], \"Done\"\n # if none of the above is true there must be a draw\n draw = 0\n for i in range(3):\n for j in range(3):\n if state_space[i][j] is ' ':\n draw = 1\n if draw is 0:\n return None, \"Draw\"\n\n return None, \"Not Done\"\n\n\n# Defining the outline of the Tic-Tac Toe for the state_space or environment\ndef outline(state_space):\n print('----------------')\n print('| ' + str(state_space[0][0]) + ' || ' + str(state_space[0][1]) + ' || ' + str(state_space[0][2]) + ' |')\n print('----------------')\n print('| ' + str(state_space[1][0]) + ' || ' + str(state_space[1][1]) + ' || ' + str(state_space[1][2]) + ' |')\n print('----------------')\n print('| ' + str(state_space[2][0]) + ' || ' + str(state_space[2][1]) + ' || ' + str(state_space[2][2]) + ' |')\n print('----------------')\n\n\n# Initializing state values\neach_player = ['X', 'O', ' ']\nstates_dictionary = {}\n# listing all possible states\nstates = [[list(i[0:3]), list(i[3:6]), list(i[6:10])] for i in product(each_player, repeat=9)]\n# getting Total number of states\nTotal_states = len(states)\nprint(\"Total number of states = \", Total_states)\n# Total number of moves/ actions in Tic-Tac-Toe is 9\nTotal_moves = 9\nprint(\"Total number of actions = \", Total_moves)\n# Intializing agent intial value as 0\nsv_O = np.full(Total_states, 0.0)\n\n# Defining the state values for agent O\nfor i in range(Total_states):\n states_dictionary[i] = states[i]\n won_by, _ = cur_state(states_dictionary[i])\n if won_by == 'X':\n sv_O[i] = -1\n elif won_by == 'O':\n sv_O[i] = 1\n\n\n# Using Update rule of Temporal difference to update the state value of 'O'\n# V(s) <- V(s) + alpha * ((V(s^f) - V(s))\n# current_state_value <- current_state_value + learning_rate * (new_state_value - current_state_value)\ndef update_O(alpha, csv, nsv):\n # alpha: learning rate, csv: current state value, nsv: next state value\n sv_O[csv] = sv_O[csv] + alpha * sv_O[nsv]\n\n\n# Testing our Tic-Tac-Toe agent 'O' vs. Human\n# Temporal difference: A RL Algo.\ndef TD(sv, each_player):\n actions = []\n curr_state_values = []\n empty_cells = []\n for i in range(3):\n for j in range(3):\n if sv[i][j] is ' ':\n empty_cells.append(i * 3 + (j + 1))\n\n for empty_cell in empty_cells:\n actions.append(empty_cell)\n new_state = new(sv)\n play(new_state, each_player, empty_cell)\n next_sid = list(states_dictionary.keys())[list(states_dictionary.values()).index(new_state)]\n curr_state_values.append(sv_O[next_sid])\n\n print('Possible Action moves = ' + str(actions))\n print('Action Move values = ' + str(curr_state_values))\n best_move_id = np.argmax(curr_state_values)\n best_move = actions[best_move_id]\n return best_move\n\n\n# Now Playing\n# Loading policy or the trained state values\nsv_O = np.loadtxt('trained_O.txt', dtype=np.float64)\n\nplay_more = \"Y\"\nwhile play_more == 'Y' or play_more == 'y':\n state_space = [[' ', ' ', ' '],[' ', ' ', ' '],[' ', ' ', ' ']]\n curr_state = \"Not Done\"\n print(\"\\n Let's start New Game!\")\n outline(state_space)\n input_choice = input(\"Choose which player to go first - X (Human) or O(RL Agent): \")\n won_by = None\n\n if input_choice == 'X' or input_choice == 'x':\n cid = 0\n else:\n cid = 1\n\n while curr_state == \"Not Done\":\n csv = list(states_dictionary.keys())[list(states_dictionary.values()).index(state_space)]\n if cid == 0:\n print(\"Now Human's turn:\")\n cell_select = int(input(\"It's your turn! Choose a block to place X (1 to 9): \"))\n play(state_space, players[cid], cell_select)\n\n else:\n cell_select = TD(state_space,players[cid])\n play(state_space,players[cid], cell_select)\n print(\"Agent O placed at\" + str(cell_select))\n\n outline(state_space)\n won_by, curr_state = cur_state(state_space)\n if won_by is not None:\n print(str(won_by) + \" Won Won Won!\")\n elif curr_state is \"Draw\":\n print(\"Draw Draw Draw!!!\")\n else:\n cid = (cid + 1) % 2\n\n play_more = input('Wanna Play more? Hit Y/N')\nprint('See you again! :D')\n"
] | [
[
"numpy.argmax",
"numpy.loadtxt",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Deech08/modspectra | [
"4af177418f9ac3e1ff30bf99968251ac143a96bc"
] | [
"modspectra/tests/test_spectrum_creation.py"
] | [
"import pytest\nfrom numpy.random import randn\nfrom numpy.random import random\nimport numpy as np\n\ndef test_non_detection():\n from ..cube import EmissionCube\n from astropy.coordinates import SkyCoord\n import astropy.units as u\n '''\n Test that an anti-center pointing returns zero emission\n '''\n l = 180. + randn()*130.\n b = 0. + randn()*20.\n while (l > 340.) | (l < 20.): # Ensure actual non-detection\n l = 180. + randn()*130.\n c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)\n spec = EmissionCube.create_DK19_spectrum(c, 0.5 * u.deg, redden = False)\n assert np.allclose(spec.value, np.zeros_like(spec.value))\n\ndef test_coordinate_error():\n from ..cube import EmissionCube\n import astropy.units as u\n '''\n Ensure that a SkyCoord Object is required\n '''\n l = 0. + randn()*5.\n b = 0. + randn()*3.\n try:\n spec = EmissionCube.create_DK19_spectrum((l,b), 0.5 * u.deg, redden = False)\n except TypeError:\n assert True\n else:\n assert False\n\ndef test_galcen_distance():\n from ..cube import EmissionCube\n import astropy.units as u\n from astropy.coordinates import SkyCoord\n '''\n Ensure that a default galcen_distnace is adopted\n '''\n l = 0. + randn()*5.\n b = 0. + randn()*3.\n c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic')\n c2 = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)\n spec = EmissionCube.create_DK19_spectrum(c, 0.5 * u.deg, redden = False)\n spec2 = EmissionCube.create_DK19_spectrum(c2, 0.5 * u.deg, redden = False)\n assert np.allclose(spec.value, spec2.value)\n\ndef test_radius_degrees():\n from ..cube import EmissionCube\n import astropy.units as u\n from astropy.coordinates import SkyCoord\n '''\n Ensure that a default units for radius are in\n '''\n l = 0. + randn()*5.\n b = 0. + randn()*3.\n c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)\n r1 = np.abs( randn()*1000.) * u.arcmin\n r2 = r1.to(u.deg).value\n spec = EmissionCube.create_DK19_spectrum(c, r1, redden = False)\n spec2 = EmissionCube.create_DK19_spectrum(c, r2, redden = False)\n assert np.allclose(spec.value, spec2.value)\n\n\n \n \n"
] | [
[
"numpy.random.randn",
"numpy.zeros_like",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.