repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
lefthandedroo/Cosmodels | [
"c355d18021467cf92546cf2fc9cb1d1abe59b8d8"
] | [
"History/Stats/emcee_ex_4_merging.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nchose m, c (parameters) for a straight line\nfrom the line pick N points (N=3, 5, 50, 100, 1000)\npick sigma (size of the noise)\nrandomly deviate(offset) points in y direction by using \nsigma*random number from normal distribution\nsigma the same for all points\nthen define the likelihood use likelihood for dataset \nwith gaussian error\nLookup how to write the eqution for a likelihood\n(and then use log likelihood)\nplug into emcee\ndraw a plot of c vs m displaying the walkers' walk\nproduce marginalised distribution - historgram \nfor every m and c found - plot them together \nplot data and error bars, \nplot what the actual model is\nfind max likelihood\nand m and b corresponding to max L\ndraw the line that they give\ntry for different sigmas\nmodify to find parameters with max posterior distribution\nuse those to plot the best line\nincrease number of dimensions, think of curve that requires 4-5 parameters\n(say polynomial)\ndo a multi dimensional search, need more walkers\nand more steps\n\nlook up first two erros - whether they are to do with python version\n\nTry to UNDERSTAND\n\nNotable results:\n best index is = 3010835\n abest is = 3.9419662932\n bbest is = -3.01946040697\n cbest is = 0.990232737609\n dbest is = 15.0034779775\n ebest is = 1.50005168141\n Mean acceptance fraction: 0.50711475\n Number of steps: 100000\n Number of walkers: 200\n Sampler time: 63min 42s\n Total time: 65min 30s\n\"\"\"\nimport corner\nimport emcee\nimport logging\nimport matplotlib.pyplot as pl\nimport numpy as np\nimport scipy.optimize as op\nimport sys\nimport time\n\n\ntry:\n timet0 = time.time() # starting script timer\n \n \n # Input\n # \"True\" parameters.\n a_true = 0.1\n b_true = -3\n c_true = 0.5\n d_true = 0.1\n e_true = 12\n \n N = 20 # number of datapoints\n sigma = 0.75 # standard deviation\n mu = 0 # mean\n \n ndim, nwalkers = 5, 12\n nsteps = 1000\n burnin = 500\n \n \n # Functions\n def lnlike(theta, x, y, sigma):\n a, b, c, d, e = theta\n model = a * x**4 + b * x**2 + c * x + d + e*np.sin(x)\n inv_sigma2 = 1.0/(sigma**2)\n return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2))) \n \n def lnprior(theta):\n a, b, c, d, e = theta\n if (-5.0 < a < 5 and -5.0 < b < 5.0 and 0.0 < c < 1.0 and 0.0 < d < 20 \n and -3.0 < e < 30):\n return 0.0\n return -np.inf\n \n def lnprob(theta, x, y, sigma):\n lp = lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, x, y, sigma) \n \n \n # Generating noisy data from the model y.\n x = np.random.rand(N)*4 # picking random points on x-axis\n yerr = np.random.normal(mu,sigma,N) # Gaussian noise\n y = a_true * x**4 + b_true * x**2 + c_true * x + d_true + e_true*np.sin(x) \n y += yerr # data, offset in y with noise\n \n \n # Finding a \"good\" place to start using alternative method to emcee.\n nll = lambda *args: -lnlike(*args)\n result = op.minimize(nll, [a_true, b_true, c_true, d_true, e_true], \n args=(x, y, yerr))\n a_ml, b_ml, c_ml, d_ml, e_ml = result[\"x\"] \n \n \n # Initializing walkers in a Gaussian ball around the max likelihood. \n pos = [result[\"x\"] + 1*np.random.randn(ndim) for i in range(nwalkers)] \n \n \n # Sampler setup\n times0 = time.time() # starting emcee timer\n \n sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, sigma))\n sampler.run_mcmc(pos, nsteps)\n \n times1=time.time() # stopping emcee timer\n times=times1 - times0 # time to run emcee\n timesmin = round((times / 60),1) # minutes\n timessec = round((times % 60),1) # seconds\n \n \n # Corner plot (walkers' walk + histogram).\n samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))\n fig = corner.corner(samples, labels=[\"$a$\", \"$b$\", \"$c$\", \"$d$\", \"$e$\"], \n truths=[a_true, b_true, c_true, d_true, e_true])\n fig.savefig('nsteps'+str(nsteps)+str(time.strftime(\"%c\"))+\n 'nwalkers'+str(nwalkers)+'.png')\n \n \n # Marginalised distribution (histogram) plot.\n pl.hist(sampler.flatchain[:,0], 100)\n pl.show()\n \n \n # Plotting lines of best fit using a 100-strong sample of parameters.\n xl = np.linspace(0,4,100)\n #for a, b, c, d, e in samples[np.random.randint(len(samples), size=100)]:\n # pl.plot(xl, a * xl**4 + b * xl**2 + c * xl + d +\n # e*np.sin(xl), color=\"k\", alpha=0.1)\n pl.plot(xl, a_true * xl**4 + b_true * xl**2 + c_true * xl + d_true + \n e_true*np.sin(xl),color=\"r\", lw=2, alpha=0.8)\n pl.errorbar(x, y, yerr=yerr, fmt=\".k\")\n pl.show()\n \n \n # Best line of fit found by emcee.\n bi = np.argmax(sampler.lnprobability) # index with highest post prob \n abest = sampler.flatchain[bi,0] # parameters with the highest \n bbest = sampler.flatchain[bi,1] # posterior probability\n cbest = sampler.flatchain[bi,2]\n dbest = sampler.flatchain[bi,3]\n ebest = sampler.flatchain[bi,4]\n \n \n # plot of data with errorbars + model\n pl.errorbar(x, y, yerr=sigma, fmt='o', alpha=0.3)\n xt = np.linspace(0,4,100)\n yt = (a_true * xt**4 + b_true * xt**2 + c_true * xt + d_true \n + e_true * np.sin(xt))\n model, = pl.plot(xt,yt,lw='3', c='g')\n ybest = (abest * xt**4 + bbest * xt**2 + cbest * xt + dbest \n + ebest * np.sin(xt))\n best_fit, = pl.plot(xt,ybest,lw='3', c='r')\n pl.legend([model, best_fit], ['Model', 'Best Fit'])\n pl.show\n \n \n timet1=time.time() # stopping script time\n timet=timet1-timet0 # total time to run script\n timetmin = round((timet / 60),1) # minutes\n timetsec = round((timet % 60),1) # seconds\n \n \n # Results getting printed:\n print('best index is =',str(bi))\n print('abest is =',str(abest))\n print('bbest is =',str(bbest))\n print('cbest is =',str(cbest))\n print('dbest is =',str(dbest))\n print('ebest is =',str(ebest))\n # Mean acceptance fraction. In general, acceptance fraction has an entry \n # for each walker so, in this case, it is a 50-dimensional vector.\n print('Mean acceptance fraction:', np.mean(sampler.acceptance_fraction))\n print('Number of steps:', str(nsteps))\n print('Number of walkers:', str(nwalkers))\n print('Sampler time:',str(int(timesmin))+'min'\n ,str(int(timessec))+'s')\n print('Total time: ',str(int(timetmin))+'min'\n ,str(int(timetsec))+'s')\n \n \nexcept Exception as e:\n logging.error('Caught exception:',str(e))\n print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sin",
"scipy.optimize.minimize",
"numpy.random.randn",
"numpy.argmax",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"numpy.isfinite",
"numpy.random.rand",
"numpy.log",
"matplotlib.pyplot.hist",
"numpy.random.normal",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.mean"
]
] |
Pascal-Bliem/tox-block | [
"fed3d54553a0911d190e421feafddb11969878cd"
] | [
"tox_block/model/lstm_multi_label.py"
] | [
"\"\"\"A bidirectional LSTM model with multi labels (6 types of toxicity)\"\"\"\n\n# general data handling and computation\nimport pandas as pd\nimport numpy as np\n# TensorFlow / Keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Embedding, Input\nfrom tensorflow.keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\n# modules for this package\nfrom tox_block.config import config\n\n\ndef get_model(embedding_matrix: np.ndarray = None,\n embedding_size: int = config.EMBEDDING_SIZE,\n max_sequence_length: int = config.MAX_SEQUENCE_LENGTH,\n max_features: int = config.MAX_FEATURES,\n dropout: float = config.DROPOUT,\n num_lstm_units: int = config.NUM_LSTM_UNITS,\n num_dense_units: int = config.NUM_DENSE_UNITS,\n learning_rate: float = config.LEARNING_RATE):\n \"\"\"Returns a bidirectional LSTM model\"\"\"\n \n inp = Input(shape=(max_sequence_length, ))\n if not embedding_matrix is None:\n x = Embedding(max_features, \n embedding_size, \n weights=[embedding_matrix])(inp)\n else:\n x = Embedding(max_features, \n embedding_size)(inp)\n x = Bidirectional(LSTM(num_lstm_units, \n return_sequences=True, \n dropout=dropout, \n recurrent_dropout=dropout))(x)\n x = GlobalMaxPool1D()(x)\n x = Dense(num_dense_units, activation=\"relu\")(x)\n x = Dropout(rate=dropout)(x)\n x = Dense(6, activation=\"sigmoid\")(x)\n model = Model(inputs=inp, outputs=x)\n model.compile(Adam(lr=learning_rate),\n loss=\"binary_crossentropy\",\n metrics=[\"accuracy\"])\n\n return model\n\n# callbacks for training\ncheckpoint = ModelCheckpoint(config.TRAINED_MODEL_DIR + \"/checkpoint.h5\", \n monitor=\"val_loss\", \n verbose=1, \n save_best_only=True, \n mode=\"min\")\n\nearly_stop = EarlyStopping(monitor=\"val_loss\", \n mode=\"min\", \n patience=2,\n restore_best_weights=True)\n\nreduce_lr = ReduceLROnPlateau(monitor=\"val_loss\",\n factor=0.5,\n patience=1,\n verbose=1,\n mode=\"min\",\n min_lr=0.00001)\n\ncallbacks_list = [checkpoint, early_stop, reduce_lr]\n\nif __name__ == '__main__':\n model = get_model()\n model.summary()"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.GlobalMaxPool1D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.layers.Input"
]
] |
frgfm/torch-zoo | [
"c97beacf3d49eaa34398abf47f378ea6b48a70f3"
] | [
"holocron/nn/modules/conv.py"
] | [
"# Copyright (C) 2019-2022, François-Guillaume Fernandez.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport math\nfrom typing import Any, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.nn.functional import pad\nfrom torch.nn.modules.conv import _ConvNd\nfrom torch.nn.modules.utils import _pair\n\nfrom .. import functional as F\n\n__all__ = [\"NormConv2d\", \"Add2d\", \"SlimConv2d\", \"PyConv2d\", \"Involution2d\"]\n\n\nclass _NormConvNd(_ConvNd):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int,\n padding: int,\n dilation: int,\n transposed: bool,\n output_padding: int,\n groups: int,\n bias: bool,\n padding_mode: str,\n normalize_slices=False,\n eps=1e-14,\n ) -> None:\n super().__init__(\n in_channels,\n out_channels,\n kernel_size, # type: ignore[arg-type]\n stride, # type: ignore[arg-type]\n padding, # type: ignore[arg-type]\n dilation, # type: ignore[arg-type]\n transposed,\n output_padding, # type: ignore[arg-type]\n groups,\n bias,\n padding_mode,\n )\n self.normalize_slices = normalize_slices\n self.eps = eps\n\n\nclass NormConv2d(_NormConvNd):\n r\"\"\"Implements the normalized convolution module from `\"Normalized Convolutional Neural Network\"\n <https://arxiv.org/pdf/2005.05274v2.pdf>`_.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`\n can be precisely described as:\n\n .. math::\n out(N_i, C_{out_j}) = bias(C_{out_j}) +\n \\sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \\star\n \\frac{input(N_i, k) - \\mu(N_i, k)}{\\sqrt{\\sigma^2(N_i, k) + \\epsilon}}\n\n where :math:`\\star` is the valid 2D cross-correlation operator,\n :math:`\\mu(N_i, k)` and :math:`\\sigma²(N_i, k)` are the mean and variance of :math:`input(N_i, k)` over all slices,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel\n elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n eps (float, optional): a value added to the denominator for numerical stability.\n Default: 1e-14\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n eps: float = 1e-14,\n ) -> None:\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super().__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation,\n False,\n _pair(0),\n groups,\n bias,\n padding_mode,\n False,\n eps,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n if self.padding_mode != \"zeros\":\n return F.norm_conv2d(\n pad(x, self._reversed_padding_repeated_twice, mode=self.padding_mode),\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n _pair(0),\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.eps,\n )\n return F.norm_conv2d(\n x,\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n self.padding, # type: ignore[arg-type]\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.eps,\n )\n\n\nclass Add2d(_NormConvNd):\n r\"\"\"Implements the adder module from `\"AdderNet: Do We Really Need Multiplications in Deep Learning?\"\n <https://arxiv.org/pdf/1912.13200.pdf>`_.\n\n In the simplest case, the output value of the layer at position :math:`(m, n)` in channel :math:`c`\n with filter F of spatial size :math:`(d, d)`, intput size :math:`(C_{in}, H, W)` and output :math:`(C_{out}, H, W)`\n can be precisely described as:\n\n .. math::\n out(m, n, c) = - \\sum\\limits_{i=0}^d \\sum\\limits_{j=0}^d \\sum\\limits_{k=0}^{C_{in}}\n |X(m + i, n + j, k) - F(i, j, k, c)|\n\n where :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/add2d.png\n :align: center\n :alt: Add2D schema\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel\n elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n normalize_slices (bool, optional): whether slices should be normalized before performing cross-correlation.\n Default: False\n eps (float, optional): a value added to the denominator for numerical stability.\n Default: 1e-14\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n normalize_slices: bool = False,\n eps: float = 1e-14,\n ) -> None:\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super().__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation,\n False,\n _pair(0),\n groups,\n bias,\n padding_mode,\n normalize_slices,\n eps,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n if self.padding_mode != \"zeros\":\n return F.add2d(\n pad(x, self._reversed_padding_repeated_twice, mode=self.padding_mode),\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n _pair(0),\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.normalize_slices,\n self.eps,\n )\n return F.add2d(\n x,\n self.weight,\n self.bias,\n self.stride, # type: ignore[arg-type]\n self.padding, # type: ignore[arg-type]\n self.dilation, # type: ignore[arg-type]\n self.groups,\n self.normalize_slices,\n self.eps,\n )\n\n\nclass SlimConv2d(nn.Module):\n r\"\"\"Implements the convolution module from `\"SlimConv: Reducing Channel Redundancy in Convolutional Neural Networks\n by Weights Flipping\" <https://arxiv.org/pdf/2003.07469.pdf>`_.\n\n First, we compute channel-wise weights as follows:\n\n .. math::\n z(c) = \\frac{1}{H \\cdot W} \\sum\\limits_{i=1}^H \\sum\\limits_{j=1}^W X_{c,i,j}\n\n where :math:`X \\in \\mathbb{R}^{C \\times H \\times W}` is the input tensor,\n :math:`H` is height in pixels, and :math:`W` is\n width in pixels.\n\n .. math::\n w = \\sigma(F_{fc2}(\\delta(F_{fc1}(z))))\n\n where :math:`z \\in \\mathbb{R}^{C}` contains channel-wise statistics,\n :math:`\\sigma` refers to the sigmoid function,\n :math:`\\delta` refers to the ReLU function,\n :math:`F_{fc1}` is a convolution operation with kernel of size :math:`(1, 1)`\n with :math:`max(C/r, L)` output channels followed by batch normalization,\n and :math:`F_{fc2}` is a plain convolution operation with kernel of size :math:`(1, 1)`\n with :math:`C` output channels.\n\n We then proceed with reconstructing and transforming both pathways:\n\n .. math::\n X_{top} = X \\odot w\n\n .. math::\n X_{bot} = X \\odot \\check{w}\n\n where :math:`\\odot` refers to the element-wise multiplication and :math:`\\check{w}` is\n the channel-wise reverse-flip of :math:`w`.\n\n .. math::\n T_{top} = F_{top}(X_{top}^{(1)} + X_{top}^{(2)})\n\n .. math::\n T_{bot} = F_{bot}(X_{bot}^{(1)} + X_{bot}^{(2)})\n\n where :math:`X^{(1)}` and :math:`X^{(2)}` are the channel-wise first and second halves of :math:`X`,\n :math:`F_{top}` is a convolution of kernel size :math:`(3, 3)`,\n and :math:`F_{bot}` is a convolution of kernel size :math:`(1, 1)` reducing channels by half,\n followed by a convolution of kernel size :math:`(3, 3)`.\n\n Finally we fuse both pathways to yield the output:\n\n .. math::\n Y = T_{top} \\oplus T_{bot}\n\n where :math:`\\oplus` is the channel-wise concatenation.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/slimconv2d.png\n :align: center\n :alt: SlimConv2D schema\n\n\n Args:\n in_channels (int): Number of channels in the input image\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel\n elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n r (int, optional): squeezing divider. Default: 32\n L (int, optional): minimum squeezed channels. Default: 8\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n r: int = 32,\n L: int = 2,\n ) -> None:\n super().__init__()\n self.fc1 = nn.Conv2d(in_channels, max(in_channels // r, L), 1)\n self.bn = nn.BatchNorm2d(max(in_channels // r, L))\n self.fc2 = nn.Conv2d(max(in_channels // r, L), in_channels, 1)\n self.conv_top = nn.Conv2d(\n in_channels // 2, in_channels // 2, kernel_size, stride, padding, dilation, groups, bias, padding_mode\n )\n self.conv_bot1 = nn.Conv2d(in_channels // 2, in_channels // 4, 1)\n self.conv_bot2 = nn.Conv2d(\n in_channels // 4, in_channels // 4, kernel_size, stride, padding, dilation, groups, bias, padding_mode\n )\n\n def forward(self, x: Tensor) -> Tensor:\n # Channel-wise weights\n z = x.mean(dim=(2, 3), keepdim=True)\n z = self.bn(self.fc1(z))\n z = self.fc2(torch.relu(z))\n w = torch.sigmoid(z)\n\n # Compression\n X_w = x * w\n X_top = X_w[:, : x.shape[1] // 2] + X_w[:, x.shape[1] // 2 :]\n X_w = x * w.flip(dims=(1,))\n X_bot = X_w[:, : x.shape[1] // 2] + X_w[:, x.shape[1] // 2 :]\n\n # Transform\n X_top = self.conv_top(X_top)\n X_bot = self.conv_bot2(self.conv_bot1(X_bot))\n\n # Fuse\n return torch.cat((X_top, X_bot), dim=1)\n\n\nclass PyConv2d(nn.ModuleList):\n \"\"\"Implements the convolution module from `\"Pyramidal Convolution: Rethinking Convolutional Neural Networks for\n Visual Recognition\" <https://arxiv.org/pdf/2006.11538.pdf>`_.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/pyconv2d.png\n :align: center\n :alt: PyConv2D schema\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int): Size of the convolving kernel\n num_levels (int, optional): number of stacks in the pyramid\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n groups (list(int), optional): Number of blocked connections from input\n channels to output channels. Default: 1\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n num_levels: int = 2,\n padding: int = 0,\n groups: Optional[List[int]] = None,\n **kwargs: Any,\n ) -> None:\n\n if num_levels == 1:\n super().__init__(\n [\n nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding,\n groups=groups[0] if isinstance(groups, list) else 1,\n **kwargs,\n )\n ]\n )\n else:\n exp2 = int(math.log2(num_levels))\n reminder = num_levels - 2**exp2\n out_chans = [out_channels // 2 ** (exp2 + 1)] * (2 * reminder) + [out_channels // 2**exp2] * (\n num_levels - 2 * reminder\n )\n\n k_sizes = [kernel_size + 2 * idx for idx in range(num_levels)]\n if groups is None:\n groups = [1] + [\n min(2 ** (2 + idx), out_chan) for idx, out_chan in zip(range(num_levels - 1), out_chans[1:])\n ]\n elif not isinstance(groups, list) or len(groups) != num_levels:\n raise ValueError(\"The argument `group` is expected to be a list of integer of size `num_levels`.\")\n paddings = [padding + idx for idx in range(num_levels)]\n\n super().__init__(\n [\n nn.Conv2d(in_channels, out_chan, k_size, padding=padding, groups=group, **kwargs)\n for out_chan, k_size, padding, group in zip(out_chans, k_sizes, paddings, groups)\n ]\n )\n self.num_levels = num_levels\n\n def forward(self, x):\n\n if self.num_levels == 1:\n return self[0].forward(x)\n return torch.cat([conv(x) for conv in self], dim=1)\n\n\nclass Involution2d(nn.Module):\n \"\"\"Implements the convolution module from `\"Involution: Inverting the Inherence of Convolution for Visual\n Recognition\" <https://arxiv.org/pdf/2103.06255.pdf>`_, adapted from the proposed PyTorch implementation in\n the paper.\n\n .. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/involutions.png\n :align: center\n :alt: Involution2d schema\n\n Args:\n in_channels (int): Number of channels in the input image\n kernel_size (int): Size of the convolving kernel\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n stride: Stride of the convolution. Default: 1\n groups: Number of blocked connections from input channels to output channels. Default: 1\n dilation: Spacing between kernel elements. Default: 1\n reduction_ratio: reduction ratio of the channels to generate the kernel\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n kernel_size: int,\n padding: int = 0,\n stride: int = 1,\n groups: int = 1,\n dilation: int = 1,\n reduction_ratio: float = 1,\n ) -> None:\n\n super().__init__()\n\n self.groups = groups\n self.k_size = kernel_size\n\n self.pool = nn.AvgPool2d(stride, stride) if stride > 1 else None\n self.reduce = nn.Conv2d(in_channels, int(in_channels // reduction_ratio), 1)\n self.span = nn.Conv2d(int(in_channels // reduction_ratio), kernel_size**2 * groups, 1)\n self.unfold = nn.Unfold(kernel_size, dilation, padding, stride)\n\n def forward(self, x):\n\n # Kernel generation\n # (N, C, H, W) --> (N, C, H // s, W // s)\n kernel = self.pool(x) if isinstance(self.pool, nn.Module) else x\n # --> (N, C // r, H // s, W // s)\n kernel = self.reduce(kernel)\n # --> (N, K * K * G, H // s, W // s)\n kernel = self.span(kernel)\n # --> (N, G, 1, K ** 2, H // s, W // s)\n kernel = kernel.view(x.shape[0], self.groups, 1, self.k_size**2, *kernel.shape[-2:])\n\n # --> (N, C * K ** 2, H * W // s ** 2)\n x_unfolded = self.unfold(x)\n # --> (N, G, C // G, K ** 2, H // s, W // s)\n x_unfolded = x_unfolded.reshape(x.shape[0], self.groups, x.shape[1] // self.groups, -1, *kernel.shape[-2:])\n\n # Multiply-Add operation\n # --> (N, C, H // s, W // s)\n out = (kernel * x_unfolded).sum(dim=3).view(*x.shape[:2], *kernel.shape[-2:])\n\n return out\n"
] | [
[
"torch.nn.functional.pad",
"torch.relu",
"torch.nn.Conv2d",
"torch.nn.AvgPool2d",
"torch.sigmoid",
"torch.nn.Unfold",
"torch.cat",
"torch.nn.modules.utils._pair"
]
] |
chnsh/deep-semantic-code-search | [
"57cf12b90b5ec3a49bd6c04cf2b68888162558b3"
] | [
"code_summarization_transfer_learning/fastai/courses/dl1/fastai/imports.py"
] | [
"from IPython.lib.deepreload import reload as dreload\nimport PIL, os, numpy as np, threading, json, bcolz, scipy\nimport pandas as pd, pickle, string, sys, re, time, shutil, copy\nimport seaborn as sns, matplotlib\nfrom abc import abstractmethod\nfrom functools import partial\nfrom pandas_summary import DataFrameSummary\nfrom IPython.lib.display import FileLink\nfrom sklearn import metrics, ensemble, preprocessing\nfrom operator import itemgetter, attrgetter\n\nfrom matplotlib import pyplot as plt, rcParams, animation\n\nmatplotlib.rc('animation', html='html5')\nnp.set_printoptions(precision=5, linewidth=110, suppress=True)\n\nfrom ipykernel.kernelapp import IPKernelApp\n\n\ndef in_notebook(): return IPKernelApp.initialized()\n\n\ndef in_ipynb():\n try:\n cls = get_ipython().__class__.__name__\n return cls == 'ZMQInteractiveShell'\n except NameError:\n return False\n\n\nimport tqdm as tq\n\n\ndef clear_tqdm():\n inst = getattr(tq.tqdm, '_instances', None)\n if not inst: return\n try:\n for i in range(len(inst)): inst.pop().close()\n except Exception:\n pass\n\n\nif in_notebook():\n def tqdm(*args, **kwargs):\n clear_tqdm()\n return tq.tqdm(*args, file=sys.stdout, **kwargs)\n\n\n def trange(*args, **kwargs):\n clear_tqdm()\n return tq.trange(*args, file=sys.stdout, **kwargs)\nelse:\n from tqdm import tqdm, trange\n\n tnrange = trange\n tqdm_notebook = tqdm\n"
] | [
[
"numpy.set_printoptions",
"matplotlib.rc"
]
] |
SCUT-AILab/BPAI-Net | [
"d71c92366222c9e226e15f8263fc2d72361735c3"
] | [
"ops/models.py"
] | [
"# Code for \"TSM: Temporal Shift Module for Efficient Video Understanding\"\n# arXiv:1811.08383\n# Ji Lin*, Chuang Gan, Song Han\n# {jilin, songhan}@mit.edu, [email protected]\n\nfrom torch import nn\nfrom ops.basic_ops import ConsensusModule\nfrom ops.transforms import *\nfrom torch.nn.init import normal_, constant_\nfrom archs.fusion_model import fusion\nclass TSN(nn.Module):\n def __init__(self, num_class, num_segments, modality,patch_size,\n base_model='resnet101', new_length=None,\n consensus_type='avg', before_softmax=True,\n dropout=0.8, img_feature_dim=256,\n crop_num=1, partial_bn=True, print_spec=True, pretrain='imagenet',\n is_shift=False, shift_div=8, shift_place='blockres', fc_lr5=False,\n temporal_pool=False, non_local=False,first=None,second=None,gcn_stride=1,base_lr=0.001,concat_layer=5,\n xyc=False,bn=False,arch_cnn='mobilenetv2',gcn_dropout=0.5):\n super(TSN, self).__init__()\n self.num_class=num_class\n self.modality = modality\n self.num_segments = num_segments\n self.reshape = True\n self.print_spec = print_spec\n self.before_softmax = before_softmax\n self.dropout = dropout\n self.crop_num = crop_num\n self.consensus_type = consensus_type\n self.img_feature_dim = img_feature_dim # the dimension of the CNN feature to represent each frame\n self.pretrain = pretrain\n self.first = first\n self.second = second\n self.bn = bn\n self.gcn_stride = gcn_stride\n self.concat_layer=concat_layer\n self.xyc = xyc\n self.arch_cnn = arch_cnn\n self.patch_size=patch_size\n self.gcn_dropout = gcn_dropout\n\n self.is_shift = is_shift\n self.shift_div = shift_div\n self.shift_place = shift_place\n self.base_model_name = base_model\n self.fc_lr5 = fc_lr5\n self.temporal_pool = temporal_pool\n self.non_local = non_local\n self.base_lr = base_lr\n if not before_softmax and consensus_type != 'avg':\n raise ValueError(\"Only avg consensus can be used after Softmax\")\n\n if new_length is None:\n self.new_length = 1 if modality == \"RGB\" else 5\n else:\n self.new_length = new_length\n if print_spec:\n print((\"\"\"\n Initializing TSN with base model: {}.\n TSN Configurations:\n input_modality: {}\n num_segments: {}\n new_length: {}\n consensus_module: {}\n dropout_ratio: {}\n img_feature_dim: {}\n \"\"\".format(base_model, self.modality, self.num_segments, self.new_length, consensus_type, self.dropout, self.img_feature_dim)))\n\n self._prepare_base_model(base_model)\n\n feature_dim = self._prepare_tsn(num_class)\n\n if self.modality == 'Flow':\n print(\"Converting the ImageNet model to a flow init model\")\n self.base_model = self._construct_flow_model(self.base_model)\n print(\"Done. Flow model ready...\")\n elif self.modality == 'RGBDiff':\n print(\"Converting the ImageNet model to RGB+Diff init model\")\n self.base_model = self._construct_diff_model(self.base_model)\n print(\"Done. RGBDiff model ready.\")\n\n self.consensus = ConsensusModule(consensus_type)\n\n if not self.before_softmax:\n self.softmax = nn.Softmax()\n\n self._enable_pbn = partial_bn\n if partial_bn:\n self.partialBN(True)\n\n def _prepare_tsn(self, num_class):\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features\n if self.dropout == 0:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))\n self.new_fc = None\n else:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\n self.new_fc = nn.Linear(feature_dim, num_class)\n\n std = 0.001\n if self.new_fc is None:\n normal_(getattr(self.base_model, self.base_model.last_layer_name).weight, 0, std)\n constant_(getattr(self.base_model, self.base_model.last_layer_name).bias, 0)\n else:\n if hasattr(self.new_fc, 'weight'):\n normal_(self.new_fc.weight, 0, std)\n constant_(self.new_fc.bias, 0)\n return feature_dim\n\n def _prepare_base_model(self, base_model):\n print('=> base model: {}'.format(base_model))\n if base_model == 'fusion':\n self.base_model = fusion(self.num_class, True if self.pretrain == 'imagenet' else False, self.first,\n self.second,stride=self.gcn_stride, patch_size=self.patch_size,\n concat_layer=self.concat_layer, xyc=self.xyc, bn=self.bn,\n arch_cnn=self.arch_cnn,dropout=self.gcn_dropout)\n\n if 'resnet' in base_model or 'resnet' in self.arch_cnn:\n\n if 'resnet' in base_model:\n self.base_model = getattr(torchvision.models, base_model)(True if self.pretrain == 'imagenet' else False)\n if self.is_shift:\n print('Adding temporal shift...')\n from ops.temporal_shift import make_temporal_shift\n make_temporal_shift(self.base_model, self.num_segments,\n n_div=self.shift_div, place=self.shift_place, temporal_pool=self.temporal_pool)\n\n if self.non_local:\n print('Adding non-local module...')\n from ops.non_local import make_non_local\n make_non_local(self.base_model, self.num_segments)\n\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n self.base_model.avgpool = nn.AdaptiveAvgPool2d(1)\n\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n elif self.modality == 'RGBDiff':\n self.input_mean = [0.485, 0.456, 0.406] + [0] * 3 * self.new_length\n self.input_std = self.input_std + [np.mean(self.input_std) * 2] * 3 * self.new_length\n\n elif base_model == 'mobilenetv2' or self.arch_cnn == 'mobilenetv2':\n from archs.mobilenet_v2 import mobilenet_v2, InvertedResidual\n #from archs.online.mobilenet_v2_online import mobilenet_v2\n fc_name = 'fc'\n if base_model == 'mobilenetv2':\n self.base_model = mobilenet_v2(True if self.pretrain == 'imagenet' else False)\n fc_name = 'classifier'\n self.base_model.last_layer_name = fc_name\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n self.base_model.avgpool = nn.AdaptiveAvgPool2d(1)\n if self.is_shift:\n from ops.temporal_shift import TemporalShift\n if self.arch_cnn == 'mobilenetv2':\n modules = self.base_model.cnn.modules()\n else:\n modules = self.base_model.modules()\n for m in modules:\n if isinstance(m, InvertedResidual) and len(m.conv) == 8 and m.use_res_connect:\n if self.print_spec:\n print('Adding temporal shift... {}'.format(m.use_res_connect))\n m.conv[0] = TemporalShift(m.conv[0], n_segment=self.num_segments, n_div=self.shift_div)\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n elif self.modality == 'RGBDiff':\n self.input_mean = [0.485, 0.456, 0.406] + [0] * 3 * self.new_length\n self.input_std = self.input_std + [np.mean(self.input_std) * 2] * 3 * self.new_length\n else:\n raise ValueError('Unknown base model: {}'.format(base_model))\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n :return:\n \"\"\"\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn and mode:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n def partialBN(self, enable):\n self._enable_pbn = enable\n\n def get_optim_policies(self):\n first_conv_weight = []\n first_conv_bias = []\n normal_weight = []\n normal_bias = []\n lr5_weight = []\n lr10_bias = []\n bn = []\n custom_ops = []\n gcn = []\n conv_cnt = 0\n bn_cnt = 0\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d):\n ps = list(m.parameters())\n conv_cnt += 1\n if conv_cnt == 1:\n first_conv_weight.append(ps[0])\n if len(ps) == 2:\n first_conv_bias.append(ps[1])\n else:\n normal_weight.append(ps[0])\n if len(ps) == 2:\n normal_bias.append(ps[1])\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n if self.fc_lr5:\n lr5_weight.append(ps[0])\n else:\n normal_weight.append(ps[0])\n if len(ps) == 2:\n if self.fc_lr5:\n lr10_bias.append(ps[1])\n else:\n normal_bias.append(ps[1])\n\n elif isinstance(m, torch.nn.BatchNorm2d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm3d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm1d):\n bn_cnt += 1\n # later BN's are frozen\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n\n return [\n {'params': first_conv_weight, 'lr_mult': 5 if self.modality == 'Flow' else 1, 'decay_mult': 1,\n 'name': \"first_conv_weight\"},\n {'params': first_conv_bias, 'lr_mult': 10 if self.modality == 'Flow' else 2, 'decay_mult': 0,\n 'name': \"first_conv_bias\"},\n {'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"normal_weight\"},\n {'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,\n 'name': \"normal_bias\"},\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\n 'name': \"BN scale/shift\"},\n {'params': custom_ops, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"custom_ops\"},\n # for fc\n {'params': lr5_weight, 'lr_mult': 5, 'decay_mult': 1,\n 'name': \"lr5_weight\"},\n {'params': lr10_bias, 'lr_mult': 10, 'decay_mult': 0,\n 'name': \"lr10_bias\"},\n ]\n\n def forward(self,input,ske=None,boxes=None,no_reshape=False):# input torch.Size([8, 24, 224, 224])\n if not no_reshape:\n sample_len = (3 if self.modality == \"RGB\" else 2) * self.new_length\n\n if self.modality == 'RGBDiff':\n sample_len = 3 * self.new_length\n input = self._get_diff(input)\n # reshape size(BT,C,H,W)\n #todo\n if self.base_model_name == 'fusion':\n ske_result,base_out= self.base_model(input.view((-1, sample_len) + input.size()[-2:]),ske,boxes) #torch.Size([16, 2048])\n else:\n base_out = self.base_model(input.view((-1, sample_len) + input.size()[-2:]))\n else:\n if 'resnet' in self.base_model_name:\n base_out = self.base_model(input)\n elif self.base_model_name == 'fusion':\n ske_result,base_out= self.base_model(input,ske,boxes) #torch.Size([16, 2048])\n else:\n raise NotImplementedError('only support resnet and fusion model')\n\n\n if self.dropout > 0:\n base_out = self.new_fc(base_out) #torch.Size([64, 34])(BT,num_classes)\n\n if not self.before_softmax:\n base_out = self.softmax(base_out)\n\n\n if self.is_shift and self.temporal_pool:\n base_out = base_out.view((-1, self.num_segments // 2) + base_out.size()[1:])\n else:\n base_out = base_out.view((-1, self.num_segments) + base_out.size()[1:]) # (8,8,34)(B,T,num_classes)\n output = self.consensus(base_out).squeeze(1) # (8,1,34)\n\n if self.base_model_name == 'fusion':\n return (output,ske_result)\n else:\n return output\n\n\n def _get_diff(self, input, keep_rgb=False):\n input_c = 3 if self.modality in [\"RGB\", \"RGBDiff\"] else 2\n input_view = input.view((-1, self.num_segments, self.new_length + 1, input_c,) + input.size()[2:])\n if keep_rgb:\n new_data = input_view.clone()\n else:\n new_data = input_view[:, :, 1:, :, :, :].clone()\n\n for x in reversed(list(range(1, self.new_length + 1))):\n if keep_rgb:\n new_data[:, :, x, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n else:\n new_data[:, :, x - 1, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n\n return new_data\n\n def _construct_flow_model(self, base_model):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules)))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n new_kernel_size = kernel_size[:1] + (2 * self.new_length, ) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n\n new_conv = nn.Conv2d(2 * self.new_length, conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convlution layer\n setattr(container, layer_name, new_conv)\n\n if self.base_model_name == 'BNInception':\n import torch.utils.model_zoo as model_zoo\n sd = model_zoo.load_url('https://www.dropbox.com/s/35ftw2t4mxxgjae/BNInceptionFlow-ef652051.pth.tar?dl=1')\n base_model.load_state_dict(sd)\n print('=> Loading pretrained Flow weight done...')\n else:\n print('#' * 30, 'Warning! No Flow pretrained model is found')\n return base_model\n\n def _construct_diff_model(self, base_model, keep_rgb=False):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n if not keep_rgb:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n else:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = torch.cat((params[0].data, params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()),\n 1)\n new_kernel_size = kernel_size[:1] + (3 + 3 * self.new_length,) + kernel_size[2:]\n\n new_conv = nn.Conv2d(new_kernel_size[1], conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convolution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n @property\n def crop_size(self):\n return self.input_size\n\n @property\n def scale_size(self):\n return self.input_size * 256 // 224\n\n def get_augmentation(self, flip=True):\n if self.modality == 'RGB':\n if flip:\n #return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66])])\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66]),\n GroupRandomHorizontalFlip(is_flow=False)])\n else:\n print('#' * 20, 'NO FLIP!!!')\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66])])\n elif self.modality == 'Flow':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75]),\n GroupRandomHorizontalFlip(is_flow=True)])\n elif self.modality == 'RGBDiff':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75]),\n GroupRandomHorizontalFlip(is_flow=False)])\n"
] | [
[
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Softmax",
"torch.nn.init.normal_",
"torch.utils.model_zoo.load_url",
"torch.nn.Dropout"
]
] |
csingh27sewts/rlpyt | [
"4252eb63515c9e68c0674fb010d2c6dbfdac9122"
] | [
"rlpyt/envs/dm_control_env.py"
] | [
"from dm_control import suite\nfrom dm_control.suite.wrappers import pixels\nfrom dm_env.specs import Array, BoundedArray\n\nimport numpy as np\nimport os\nimport atari_py\nimport cv2\nimport copy\nfrom collections import namedtuple, OrderedDict\nfrom rlpyt.utils.collections import namedarraytuple\n\nfrom rlpyt.envs.base import Env, EnvStep, EnvSpaces\nfrom rlpyt.spaces.box import Box\nfrom rlpyt.spaces.composite import Composite\nfrom rlpyt.utils.quick_args import save__init__args\nfrom rlpyt.samplers.collections import TrajInfo\n\nState = None\n\ndef convert_dm_control_to_rlpyt_space(dm_control_space):\n \"\"\"Recursively convert dm_control_space into gym space.\n\n Note: Need to check the following cases of the input type, in the following\n order:\n (1) BoundedArray\n (2) Array\n (3) OrderedDict.\n\n - Generally, dm_control observation_specs are OrderedDict with other spaces\n (e.g. Array) nested in it.\n - Generally, dm_control action_specs are of type `BoundedArray`.\n\n To handle dm_control observation_specs as inputs, we check the following\n input types in order to enable recursive calling on each nested item.\n \"\"\"\n if isinstance(dm_control_space, BoundedArray):\n rlpyt_box = Box(\n low=dm_control_space.minimum,\n high=dm_control_space.maximum,\n shape=None,\n dtype=dm_control_space.dtype)\n assert rlpyt_box.shape == dm_control_space.shape, (\n (rlpyt_box.shape, dm_control_space.shape))\n return rlpyt_box\n elif isinstance(dm_control_space, Array):\n if isinstance(dm_control_space, BoundedArray):\n raise ValueError(\"The order of the if-statements matters.\")\n return Box(\n low=-float(\"inf\"),\n high=float(\"inf\"),\n shape=dm_control_space.shape,\n dtype=dm_control_space.dtype)\n elif isinstance(dm_control_space, OrderedDict):\n global State\n if State is None:\n State = namedtuple('State', list(dm_control_space.keys()))\n return Composite([convert_dm_control_to_rlpyt_space(value)\n for value in dm_control_space.values()], State)\n else:\n raise ValueError(dm_control_space)\n\nEnvInfo = None\nObservation = None\n\ndef init_namedtuples(info_keys=None, state_keys=None):\n global EnvInfo, Observation, State\n\n if info_keys is None:\n info_keys = ['traj_done']\n\n if state_keys is None:\n state_keys = ['pixels']\n\n EnvInfo = namedtuple('EnvInfo', info_keys)\n Observation = namedarraytuple('Observation', state_keys)\n State = namedtuple('State', state_keys)\n\nclass DMControlEnv(Env):\n\n def __init__(self,\n domain,\n task,\n frame_skip=1,\n normalize=False,\n pixel_wrapper_kwargs=None,\n task_kwargs={},\n environment_kwargs={},\n max_path_length=1200,\n ):\n save__init__args(locals(), underscore=True)\n\n env = suite.load(domain_name=domain,\n task_name=task,\n task_kwargs=task_kwargs,\n environment_kwargs=environment_kwargs)\n if normalize:\n np.testing.assert_equal(env.action_spec().minimum, -1)\n np.testing.assert_equal(env.action_spec().maximum, 1)\n if pixel_wrapper_kwargs is not None:\n env = pixels.Wrapper(env, **pixel_wrapper_kwargs)\n self._env = env\n\n self._observation_keys = tuple(env.observation_spec().keys())\n observation_space = convert_dm_control_to_rlpyt_space(\n env.observation_spec())\n self._observation_space = observation_space\n\n action_space = convert_dm_control_to_rlpyt_space(env.action_spec())\n if len(action_space.shape) > 1:\n raise NotImplementedError(\n \"Shape of the action space ({}) is not flat, make sure to\"\n \" check the implemenation.\".format(action_space))\n self._action_space = action_space\n\n self._step_count = 0\n\n def reset(self):\n self._step_count = 0\n time_step = self._env.reset()\n observation = self._filter_observation(time_step.observation)\n\n global Observation\n if Observation is None:\n Observation = namedarraytuple(\"Observation\", list(observation.keys()))\n observation = Observation(**{k: v for k, v in observation.items()\n if k in self._observation_keys})\n return observation\n\n def step(self, action):\n time_step = self._env.step(action)\n reward = time_step.reward\n terminal = time_step.last()\n info = time_step.info\n info.update({\n key: value\n for key, value in time_step.observation.items()\n if key not in self._observation_keys\n })\n observation = self._filter_observation(time_step.observation)\n\n self._step_count += 1\n info['traj_done'] = self._step_count >= self._max_path_length\n\n global EnvInfo\n if EnvInfo is None:\n EnvInfo = namedtuple(\"EnvInfo\", list(info.keys()))\n info = EnvInfo(**{k: v for k, v in info.items() if k in EnvInfo._fields})\n\n global Observation\n if Observation is None:\n Observation = namedarraytuple(\"Observation\", list(observation.keys()))\n observation = Observation(**{k: v.copy() for k, v in observation.items()\n if k in self._observation_keys})\n\n return EnvStep(observation, reward, terminal, info)\n\n def render(self, *args, mode='rgb_array', width=256, height=256,\n cameria_id=0, **kwargs):\n if mode == 'human':\n raise NotImplementedError(\n \"TODO(Alacarter): Figure out how to not continuously launch\"\n \" viewers if one is already open.\"\n \" See: https://github.com/deepmind/dm_control/issues/39.\")\n elif mode == 'rgb_array':\n return self._env.physics.render(width=width, height=height,\n camera_id=cameria_id, **kwargs)\n raise NotImplementedError(mode)\n\n def get_obs(self):\n obs = self._env.task.get_observation(self._env.physics)\n obs['pixels'] = self._env.physics.render(**self._env._render_kwargs)\n obs = self._filter_observation(obs)\n obs = Observation(**{k: v for k, v in obs.items()\n if k in self._observation_keys})\n return obs\n\n def get_state(self, ignore_step=True):\n if ignore_step:\n return self._env.physics.get_state()\n return self._env.physics.get_state(), self._step_count\n\n def set_state(self, state, ignore_step=True):\n if ignore_step:\n self._env.physics.set_state(state)\n self._env.step(np.zeros(self.action_space.shape))\n else:\n self._env.physics.set_state(state[0])\n self._env.step(np.zeros(self.action_space.shape))\n self._step_count = state[1]\n\n def get_geoms(self):\n return self._env.task.get_geoms(self._env.physics)\n\n @property\n def spaces(self):\n return EnvSpaces(\n observation=self._observation_space,\n action=self._action_space,\n )\n\n ###########################################################################\n # Helpers\n\n def _filter_observation(self, observation):\n observation = type(observation)([\n (name, value)\n for name, value in observation.items()\n if name in self._observation_keys\n ])\n return observation\n\n ###########################################################################\n # Properties\n"
] | [
[
"numpy.zeros"
]
] |
MUST-AI-Lab/NAS-Projects | [
"1ce3249a5a58af3506b8c9af977008ddf8198445"
] | [
"exps/NAS-Bench-201/statistics.py"
] | [
"##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #\n##################################################\nimport os, sys, time, argparse, collections\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nfrom collections import defaultdict\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom config_utils import load_config, dict2config\nfrom datasets import get_datasets\n# NAS-Bench-201 related module or function\nfrom models import CellStructure, get_cell_based_tiny_net\nfrom nas_201_api import ArchResults, ResultsCount\nfrom functions import pure_evaluate\n\n\n\ndef create_result_count(used_seed, dataset, arch_config, results, dataloader_dict):\n xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], \\\n results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)\n\n net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes':arch_config['class_num']}, None)\n network = get_cell_based_tiny_net(net_config)\n network.load_state_dict(xresult.get_net_param())\n if 'train_times' in results: # new version\n xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])\n xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])\n else:\n if dataset == 'cifar10-valid':\n xresult.update_OLD_eval('x-valid' , results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda())\n xresult.update_OLD_eval('ori-test', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n xresult.update_latency(latencies)\n elif dataset == 'cifar10':\n xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())\n xresult.update_latency(latencies)\n elif dataset == 'cifar100' or dataset == 'ImageNet16-120':\n xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda())\n xresult.update_OLD_eval('x-valid', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())\n xresult.update_OLD_eval('x-test' , {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})\n xresult.update_latency(latencies)\n else:\n raise ValueError('invalid dataset name : {:}'.format(dataset))\n return xresult\n \n\n\ndef account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict):\n information = ArchResults(arch_index, arch_str)\n\n for checkpoint_path in checkpoints:\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]\n for dataset in datasets:\n assert dataset in checkpoint, 'Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path)\n results = checkpoint[dataset]\n assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path)\n arch_config = {'channel': results['channel'], 'num_cells': results['num_cells'], 'arch_str': arch_str, 'class_num': results['config']['class_num']}\n \n xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict)\n information.update(dataset, int(used_seed), xresult)\n return information\n\n\n\ndef GET_DataLoaders(workers):\n\n torch.set_num_threads(workers)\n\n root_dir = (Path(__file__).parent / '..' / '..').resolve()\n torch_dir = Path(os.environ['TORCH_HOME'])\n # cifar\n cifar_config_path = root_dir / 'configs' / 'nas-benchmark' / 'CIFAR.config'\n cifar_config = load_config(cifar_config_path, None, None)\n print ('{:} Create data-loader for all datasets'.format(time_string()))\n print ('-'*200)\n TRAIN_CIFAR10, VALID_CIFAR10, xshape, class_num = get_datasets('cifar10', str(torch_dir/'cifar.python'), -1)\n print ('original CIFAR-10 : {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR10), len(VALID_CIFAR10), xshape, class_num))\n cifar10_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar-split.txt', None, None)\n assert cifar10_splits.train[:10] == [0, 5, 7, 11, 13, 15, 16, 17, 20, 24] and cifar10_splits.valid[:10] == [1, 2, 3, 4, 6, 8, 9, 10, 12, 14]\n temp_dataset = deepcopy(TRAIN_CIFAR10)\n temp_dataset.transform = VALID_CIFAR10.transform\n # data loader\n trainval_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, shuffle=True , num_workers=workers, pin_memory=True)\n train_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.train), num_workers=workers, pin_memory=True)\n valid_cifar10_loader = torch.utils.data.DataLoader(temp_dataset , batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.valid), num_workers=workers, pin_memory=True)\n test__cifar10_loader = torch.utils.data.DataLoader(VALID_CIFAR10, batch_size=cifar_config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)\n print ('CIFAR-10 : trval-loader has {:3d} batch with {:} per batch'.format(len(trainval_cifar10_loader), cifar_config.batch_size))\n print ('CIFAR-10 : train-loader has {:3d} batch with {:} per batch'.format(len(train_cifar10_loader), cifar_config.batch_size))\n print ('CIFAR-10 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_cifar10_loader), cifar_config.batch_size))\n print ('CIFAR-10 : test--loader has {:3d} batch with {:} per batch'.format(len(test__cifar10_loader), cifar_config.batch_size))\n print ('-'*200)\n # CIFAR-100\n TRAIN_CIFAR100, VALID_CIFAR100, xshape, class_num = get_datasets('cifar100', str(torch_dir/'cifar.python'), -1)\n print ('original CIFAR-100: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR100), len(VALID_CIFAR100), xshape, class_num))\n cifar100_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar100-test-split.txt', None, None)\n assert cifar100_splits.xvalid[:10] == [1, 3, 4, 5, 8, 10, 13, 14, 15, 16] and cifar100_splits.xtest[:10] == [0, 2, 6, 7, 9, 11, 12, 17, 20, 24]\n train_cifar100_loader = torch.utils.data.DataLoader(TRAIN_CIFAR100, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)\n valid_cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True)\n test__cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest) , num_workers=workers, pin_memory=True)\n print ('CIFAR-100 : train-loader has {:3d} batch'.format(len(train_cifar100_loader)))\n print ('CIFAR-100 : valid-loader has {:3d} batch'.format(len(valid_cifar100_loader)))\n print ('CIFAR-100 : test--loader has {:3d} batch'.format(len(test__cifar100_loader)))\n print ('-'*200)\n\n imagenet16_config_path = 'configs/nas-benchmark/ImageNet-16.config'\n imagenet16_config = load_config(imagenet16_config_path, None, None)\n TRAIN_ImageNet16_120, VALID_ImageNet16_120, xshape, class_num = get_datasets('ImageNet16-120', str(torch_dir/'cifar.python'/'ImageNet16'), -1)\n print ('original TRAIN_ImageNet16_120: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_ImageNet16_120), len(VALID_ImageNet16_120), xshape, class_num))\n imagenet_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'imagenet-16-120-test-split.txt', None, None)\n assert imagenet_splits.xvalid[:10] == [1, 2, 3, 6, 7, 8, 9, 12, 16, 18] and imagenet_splits.xtest[:10] == [0, 4, 5, 10, 11, 13, 14, 15, 17, 20]\n train_imagenet_loader = torch.utils.data.DataLoader(TRAIN_ImageNet16_120, batch_size=imagenet16_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)\n valid_imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xvalid), num_workers=workers, pin_memory=True)\n test__imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xtest) , num_workers=workers, pin_memory=True)\n print ('ImageNet-16-120 : train-loader has {:3d} batch with {:} per batch'.format(len(train_imagenet_loader), imagenet16_config.batch_size))\n print ('ImageNet-16-120 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_imagenet_loader), imagenet16_config.batch_size))\n print ('ImageNet-16-120 : test--loader has {:3d} batch with {:} per batch'.format(len(test__imagenet_loader), imagenet16_config.batch_size))\n\n # 'cifar10', 'cifar100', 'ImageNet16-120'\n loaders = {'cifar10@trainval': trainval_cifar10_loader,\n 'cifar10@train' : train_cifar10_loader,\n 'cifar10@valid' : valid_cifar10_loader,\n 'cifar10@test' : test__cifar10_loader,\n 'cifar100@train' : train_cifar100_loader,\n 'cifar100@valid' : valid_cifar100_loader,\n 'cifar100@test' : test__cifar100_loader,\n 'ImageNet16-120@train': train_imagenet_loader,\n 'ImageNet16-120@valid': valid_imagenet_loader,\n 'ImageNet16-120@test' : test__imagenet_loader}\n return loaders\n\n\n\ndef simplify(save_dir, meta_file, basestr, target_dir):\n meta_infos = torch.load(meta_file, map_location='cpu')\n meta_archs = meta_infos['archs'] # a list of architecture strings\n meta_num_archs = meta_infos['total']\n meta_max_node = meta_infos['max_node']\n assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))\n\n sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))\n print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))\n \n subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0\n num_seeds = defaultdict(lambda: 0)\n for index, sub_dir in enumerate(sub_model_dirs):\n xcheckpoints = list(sub_dir.glob('arch-*-seed-*.pth'))\n arch_indexes = set()\n for checkpoint in xcheckpoints:\n temp_names = checkpoint.name.split('-')\n assert len(temp_names) == 4 and temp_names[0] == 'arch' and temp_names[2] == 'seed', 'invalid checkpoint name : {:}'.format(checkpoint.name)\n arch_indexes.add( temp_names[1] )\n subdir2archs[sub_dir] = sorted(list(arch_indexes))\n num_evaluated_arch += len(arch_indexes)\n # count number of seeds for each architecture\n for arch_index in arch_indexes:\n num_seeds[ len(list(sub_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))) ] += 1\n print('{:} There are {:5d} architectures that have been evaluated ({:} in total).'.format(time_string(), num_evaluated_arch, meta_num_archs))\n for key in sorted( list( num_seeds.keys() ) ): print ('{:} There are {:5d} architectures that are evaluated {:} times.'.format(time_string(), num_seeds[key], key))\n\n dataloader_dict = GET_DataLoaders( 6 )\n\n to_save_simply = save_dir / 'simplifies'\n to_save_allarc = save_dir / 'simplifies' / 'architectures'\n if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)\n if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True)\n\n assert (save_dir / target_dir) in subdir2archs, 'can not find {:}'.format(target_dir)\n arch2infos, datasets = {}, ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120')\n evaluated_indexes = set()\n target_directory = save_dir / target_dir\n target_less_dir = save_dir / '{:}-LESS'.format(target_dir)\n arch_indexes = subdir2archs[ target_directory ]\n num_seeds = defaultdict(lambda: 0)\n end_time = time.time()\n arch_time = AverageMeter()\n for idx, arch_index in enumerate(arch_indexes):\n checkpoints = list(target_directory.glob('arch-{:}-seed-*.pth'.format(arch_index)))\n ckps_less = list(target_less_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))\n # create the arch info for each architecture\n try:\n arch_info_full = account_one_arch(arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict)\n arch_info_less = account_one_arch(arch_index, meta_archs[int(arch_index)], ckps_less, ['cifar10-valid'], dataloader_dict)\n num_seeds[ len(checkpoints) ] += 1\n except:\n print('Loading {:} failed, : {:}'.format(arch_index, checkpoints))\n continue\n assert int(arch_index) not in evaluated_indexes, 'conflict arch-index : {:}'.format(arch_index)\n assert 0 <= int(arch_index) < len(meta_archs), 'invalid arch-index {:} (not found in meta_archs)'.format(arch_index)\n arch_info = {'full': arch_info_full, 'less': arch_info_less}\n evaluated_indexes.add( int(arch_index) )\n arch2infos[int(arch_index)] = arch_info\n torch.save({'full': arch_info_full.state_dict(),\n 'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-FULL.pth'.format(arch_index))\n arch_info['full'].clear_params()\n arch_info['less'].clear_params()\n torch.save({'full': arch_info_full.state_dict(),\n 'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-SIMPLE.pth'.format(arch_index))\n # measure elapsed time\n arch_time.update(time.time() - end_time)\n end_time = time.time()\n need_time = '{:}'.format( convert_secs2time(arch_time.avg * (len(arch_indexes)-idx-1), True) )\n print('{:} {:} [{:03d}/{:03d}] : {:} still need {:}'.format(time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time))\n # measure time\n xstrs = ['{:}:{:03d}'.format(key, num_seeds[key]) for key in sorted( list( num_seeds.keys() ) ) ]\n print('{:} {:} done : {:}'.format(time_string(), target_dir, xstrs))\n final_infos = {'meta_archs' : meta_archs,\n 'total_archs': meta_num_archs,\n 'basestr' : basestr,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = to_save_simply / '{:}.pth'.format(target_dir)\n torch.save(final_infos, save_file_name)\n print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))\n\n\n\ndef merge_all(save_dir, meta_file, basestr):\n meta_infos = torch.load(meta_file, map_location='cpu')\n meta_archs = meta_infos['archs']\n meta_num_archs = meta_infos['total']\n meta_max_node = meta_infos['max_node']\n assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))\n\n sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))\n print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))\n for index, sub_dir in enumerate(sub_model_dirs):\n arch_info_files = sorted( list(sub_dir.glob('arch-*-seed-*.pth') ) )\n print ('The {:02d}/{:02d}-th directory : {:} : {:} runs.'.format(index, len(sub_model_dirs), sub_dir, len(arch_info_files)))\n \n arch2infos, evaluated_indexes = dict(), set()\n for IDX, sub_dir in enumerate(sub_model_dirs):\n ckp_path = sub_dir.parent / 'simplifies' / '{:}.pth'.format(sub_dir.name)\n if ckp_path.exists():\n sub_ckps = torch.load(ckp_path, map_location='cpu')\n assert sub_ckps['total_archs'] == meta_num_archs and sub_ckps['basestr'] == basestr\n xarch2infos = sub_ckps['arch2infos']\n xevalindexs = sub_ckps['evaluated_indexes']\n for eval_index in xevalindexs:\n assert eval_index not in evaluated_indexes and eval_index not in arch2infos\n #arch2infos[eval_index] = xarch2infos[eval_index].state_dict()\n arch2infos[eval_index] = {'full': xarch2infos[eval_index]['full'].state_dict(),\n 'less': xarch2infos[eval_index]['less'].state_dict()}\n evaluated_indexes.add( eval_index )\n print ('{:} [{:03d}/{:03d}] merge data from {:} with {:} models.'.format(time_string(), IDX, len(sub_model_dirs), ckp_path, len(xevalindexs)))\n else:\n raise ValueError('Can not find {:}'.format(ckp_path))\n #print ('{:} [{:03d}/{:03d}] can not find {:}, skip.'.format(time_string(), IDX, len(subdir2archs), ckp_path))\n\n evaluated_indexes = sorted( list( evaluated_indexes ) )\n print ('Finally, there are {:} architectures that have been trained and evaluated.'.format(len(evaluated_indexes)))\n\n to_save_simply = save_dir / 'simplifies'\n if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)\n final_infos = {'meta_archs' : meta_archs,\n 'total_archs': meta_num_archs,\n 'arch2infos' : arch2infos,\n 'evaluated_indexes': evaluated_indexes}\n save_file_name = to_save_simply / '{:}-final-infos.pth'.format(basestr)\n torch.save(final_infos, save_file_name)\n print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='NAS-BENCH-201', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--mode' , type=str, choices=['cal', 'merge'], help='The running mode for this script.')\n parser.add_argument('--base_save_dir', type=str, default='./output/NAS-BENCH-201-4', help='The base-name of folder to save checkpoints and log.')\n parser.add_argument('--target_dir' , type=str, help='The target directory.')\n parser.add_argument('--max_node' , type=int, default=4, help='The maximum node in a cell.')\n parser.add_argument('--channel' , type=int, default=16, help='The number of channels.')\n parser.add_argument('--num_cells' , type=int, default=5, help='The number of cells in one stage.')\n args = parser.parse_args()\n \n save_dir = Path( args.base_save_dir )\n meta_path = save_dir / 'meta-node-{:}.pth'.format(args.max_node)\n assert save_dir.exists(), 'invalid save dir path : {:}'.format(save_dir)\n assert meta_path.exists(), 'invalid saved meta path : {:}'.format(meta_path)\n print ('start the statistics of our nas-benchmark from {:} using {:}.'.format(save_dir, args.target_dir))\n basestr = 'C{:}-N{:}'.format(args.channel, args.num_cells)\n \n if args.mode == 'cal':\n simplify(save_dir, meta_path, basestr, args.target_dir)\n elif args.mode == 'merge':\n merge_all(save_dir, meta_path, basestr)\n else:\n raise ValueError('invalid mode : {:}'.format(args.mode))\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"torch.save",
"torch.set_num_threads",
"torch.utils.data.sampler.SubsetRandomSampler"
]
] |
rtachi-lab/Human-Cochlear-Model | [
"6584de225176d8d1b2be96939acb7ef7d3f64774"
] | [
"CochlearModel_2D_Direct.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tqdm\nimport wavfile\n\nclass CochlearModel:\n \"\"\"\n Two-dimensional cochlear model with two-degree-of-freedom\n (2DOF) micro-structure [1] for human. This program employs \n time domain solution proposed in Ref. [2], and for fast calcuration,\n applies non-unifomrom grid setting [3].\n\n Ref.\n [1] Neely S and Kim D, \"A model for active elements in cochlear biomechanics,\"\n The Journal of the Acoustical Society of America, 79(5), 1472--1480, 1986.\n [2] Diependaal, R.J et al, \"Numerical methods for solving one-dimensional\n cochlear models in the time domain, \" The Journal of the Acoustical Society of \n America, 82 (5), 1655--1666, 1987\n [3] Murakami, Y \"Efficiency limit of nonuniform grid setting in two-dimensional\n cochlear model\" Acoustical Science and Technology, 40 (5), 336--343, 2019. \n \n Attributes\n ----------\n Nx : int\n Number of segments for x-axis\n Ny : int\n Number of segments for y-axis\n Lb : float\n Cochlear length [cm]\n W : float\n Witdh of basilar membrane (BM) [cm]\n H : float\n Height of BM [cm]\n b : float\n ratio of BM to CP displacement\n rho : float\n Fluid density [dyn cm^-3]\n dx : float\n Spacing between two segments for x-axis [cm]\n dy : float\n Spacing between two segments for y-axis [cm]\n x : ndarray\n Longitudial poisition from the stapes [cm]\n y : ndarray\n Poisition from the BM [cm]\n k1 : ndarray\n Compliance of BM [dyn cm^-3]\n m1 : ndarray\n Mass of BM [g cm^-2]\n c1 : ndarray \n Resistance of BM [dyn s cm^-3]\n k2 : ndarray\n Compliance of tectrial membrane (TM) [dyn cm^-3]\n m2 : ndarray\n Mass of TM [g cm^-2]\n c2 : ndarray\n Resistance of TM [dyn s cm^-3]\n k3 : ndarray\n Compliance of connection between BM and TM [dyn cm^-3]\n c3 : ndarray\n Resistance of connection between BM and TM [dyn s cm^-3]\n k4 : ndarray\n Compliance of outer hair cell's (OHC's) activity [dyn cm^-3]\n c4 : ndarray\n Resistance of outer hair cell's (OHC's) activity [dyn s cm^-3]\n gamma : ndarray\n Gain factor distribution \n dt : float\n Time step for time domain simulation [sec]\n beta : float\n Complete saturating point in OHC's active process [cm]\n \"\"\"\n def __init__(self, Nx, Ny, gamma):\n \"\"\"\n Parameters\n ----------\n Nx : int\n Number of segment for x-axis\n Ny : int\n Number of segment for y-axis\n gamma : ndarray\n Gain factor distribution\n \"\"\"\n self.Nx = Nx\n self.Ny = Ny\n self.Lb = 3.5\n self.L = 0.1\n self.W = 0.1\n self.H = 0.1\n self.b = 0.4\n self.rho = 1.0\n self.dx = self.Lb/self.Nx\n self.x = np.arange(0,self.Lb,self.dx)\n \n By = 100\n ry = 100\n m = np.linspace(0,1,Ny)\n Bdy = np.exp(By)\n Ay = (ry-1)*By/((ry-1)*(Bdy-1)+By*(Bdy-1))\n Cy = 1-Ay/By*Bdy+Ay/By\n Dy = -Ay/By\n self.y = (Ay/By*np.exp(By*m) + Cy*m + Dy)*self.H\n self.dy = self.y[1:]-self.y[0:-1]\n\n ch_damp = 2.8 * np.exp(-0.2 * self.x)\n \n self.k1 = 2.2e8*np.exp(-3*self.x)\n self.m1 = 3e-3\n self.c1 = 6 + 670*np.exp(-1.5*self.x) * ch_damp\n self.k2 = 1.4e6*np.exp(-3.3*self.x)\n self.c2 = 4.4*np.exp(-1.65*self.x) * ch_damp\n self.m2 = 0.5e-3\n self.k3 = 2.0e6*np.exp(-3*self.x)\n self.c3 = 0.8*np.exp(-0.6*self.x) * ch_damp\n self.k4 = 1.15e8*np.exp(-3*self.x)\n self.c4 = 440.0*np.exp(-1.5*self.x) * ch_damp\n\n self.c1c3 = self.c1 + self.c3\n self.k1k3 = self.k1 + self.k3\n self.c2c3 = self.c2 + self.c3\n self.k2k3 = self.k2 + self.k3\n\n self.gamma = gamma\n\n self.dt = 10e-6\n\n self.beta = 50e-7\n\n def Gohc(self, uc, beta):\n return beta*np.tanh(uc/beta)\n\n def dGohc(self, uc, vc, beta):\n return vc/np.cosh(uc)**2\n\n def get_g(self, vb, ub, vt, ut):\n\n gb = self.c1c3*vb + self.k1k3*ub - self.c3*vt - self.k3*ut\n gt = - self.c3*vb - self.k3*ub + self.c2c3*vt + self.k2k3*ut\n\n uc_lin = ub - ut\n vc_lin = vb - vt\n\n uc = self.Gohc(uc_lin, self.beta)\n vc = self.dGohc(uc_lin, vc_lin, self.beta)\n\n gb -= self.gamma * ( self.c4*vc + self.k4*uc )\n\n return gb, gt\n\n def solve_time_domain(self, f):\n \"\"\"\n Solve the cochlear model in time domain\n\n Parameters\n ----------\n f : ndarray\n Input signal [cm s^-2]\n\n Returns:\n --------\n vb : ndarray\n Basilar membrane (BM) velocity [cm s^-1]\n ub : ndarray\n Basilar membrane (BM) displacement [cm]\n p : ndarray\n Pressure difference between two chambers [barye]\n (1 [barye]= 0.1 [Pa])\n \"\"\"\n Ntime = int(round(f.size/2))\n T = Ntime * self.dt\n\n t2 = np.arange(0,T,self.dt/2)\n t = np.arange(0,T,self.dt)\n\n alpha2 = 4*self.rho*self.b/self.dy/self.m1\n\n vb = np.zeros((Ntime,Nx))\n ub = np.zeros((Ntime,Nx))\n vt = np.zeros((Ntime,Nx))\n ut = np.zeros((Ntime,Nx))\n\n p = np.zeros((Ntime,Nx))\n\n Ay = np.zeros((Ny,Ny))\n\n Ay[0,0] = -2/self.dy[0]**2 - alpha2[0]\n Ay[0,1] = 2/self.dy[0]**2\n\n aym = np.zeros(Ny)\n bym = np.zeros(Ny)\n aym[1:-1] = self.dy[1:]/self.dy[:-1]\n bym[1:-1] = 2/self.dy[1:]/self.dy[:-1]/(1+aym[1:-1])\n\n for m in range(1,Ny-1):\n Ay[m,m-1] = bym[m]*aym[m]\n Ay[m,m] = -bym[m]*(1+aym[m])\n Ay[m,m+1] = bym[m]\n\n Ay[Ny-1,Ny-2] = 2/self.dy[-1]**2\n Ay[Ny-1,Ny-1] = -2/self.dy[-1]**2\n\n Iy = np.eye(Ny)\n F = np.zeros((Nx*Ny,Nx*Ny))\n F[:Ny,:Ny] = -2*Iy + Ay*self.dx**2\n F[:Ny,Ny:Ny*2] = 2*Iy\n for mm in range(1,Nx-1):\n F[mm*Ny:(mm+1)*Ny,(mm-1)*Ny:mm*Ny] = Iy \n F[mm*Ny:(mm+1)*Ny,mm*Ny:(mm+1)*Ny] = -2*Iy + Ay*self.dx**2 \n F[mm*Ny:(mm+1)*Ny,(mm+1)*Ny:(mm+2)*Ny] = Iy \n F[(Nx-1)*Ny:,(Nx-2)*Ny:(Nx-1)*Ny] = Iy\n F[(Nx-1)*Ny:,(Nx-1)*Ny:] = -2*Iy + Ay*self.dx**2\n F /= self.dx**2\n\n iF = np.linalg.inv(F)\n\n for ii in tqdm.tqdm(range(Ntime-1)):\n ######### RK4 ##################\n\n # (ii)\n gb, gt = self.get_g(vb[ii], ub[ii], vt[ii], ut[ii])\n\n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2] * 2/self.dx\n \n #(iii)\n p[ii] = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb1 = (p[ii]-gb)/self.m1 \n ub1 = ub[ii] + 0.5*self.dt*vb[ii]\n vb1 = vb[ii] + 0.5*self.dt*dvb1\n\n dvt1 = -gt/self.m2\n ut1 = ut[ii] + 0.5*self.dt*vt[ii]\n vt1 = vt[ii] + 0.5*self.dt*dvt1 \n \n # (ii)\n gb, gt = self.get_g(vb1, ub1, vt1, ut1) \n\n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2+1] * 2/self.dx\n\n #(iii)\n p1 = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb2 = (p1-gb)/self.m1\n ub2 = ub[ii] + 0.5*self.dt*vb1\n vb2 = vb[ii] + 0.5*self.dt*dvb2\n\n dvt2 = -gt/self.m2\n ut2 = ut[ii] + 0.5*self.dt*vt1\n vt2 = vt[ii] + 0.5*self.dt*dvt2 \n\n # (ii)\n gb, gt = self.get_g(vb2, ub2, vt2, ut2)\n\n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2+1] * 2/self.dx\n\n #(iii)\n p2 = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb3 = (p2-gb)/self.m1\n ub3 = ub[ii] + self.dt*vb2 \n vb3 = vb[ii] + self.dt*dvb3\n\n dvt3 = -gt/self.m2\n ut3 = ut[ii] + self.dt*vt2\n vt3 = vt[ii] + self.dt*dvt3 \n\n # (ii)\n gb, gt = self.get_g(vb3, ub3, vt3, ut3)\n \n k = np.zeros(Nx*Ny)\n k[::Ny] -= alpha2[0]*gb\n k[:Ny] -= f[ii*2+2] * 2/self.dx\n\n #(iii)\n p3 = np.dot(iF, k)[::Ny]\n\n #(iv)-(v)\n dvb4 = (p3-gb)/self.m1\n\n dvt4 = -gt/self.m2 \n\n ub[ii+1] = ub[ii] + self.dt/6*(vb[ii] + 2*vb1 + 2*vb2 + vb3)\n vb[ii+1] = vb[ii] + self.dt/6*(dvb1 + 2*dvb2 + 2*dvb3 + dvb4) \n ut[ii+1] = ut[ii] + self.dt/6*(vt[ii] + 2*vt1 + 2*vt2 + vt3)\n vt[ii+1] = vt[ii] + self.dt/6*(dvt1 + 2*dvt2 + 2*dvt3 + dvt4)\n\n return vb, ub, p\n\n\"\"\"\nA demonstration plots envelopes of basilar membrane (BM) velocity\nfor 0.25, 1 and 4 kHz tones varied 0 to 100 dB with 20 dB step.\n\"\"\" \nif __name__ == \"__main__\":\n Nx = 300\n Ny = 4\n g = 0.8\n\n gamma = np.ones(Nx)*g\n\n cm = CochlearModel(Nx, Ny, gamma) # Initial setup\n\n Lps = np.arange(0,120,20)\n\n for fp in [250, 1000, 4000]:\n filename = '%gHz.wav'%(fp)\n plt.figure()\n for Lp in Lps:\n print(\"%dHz %ddB\"%(fp, Lp))\n sinewave = wavfile.load(filename, Lp) # Loading input signal\n\n vb, ub, p = cm.solve_time_domain( sinewave ) # Solve\n\n plt.plot(cm.x*10, 20*np.log10(np.max(np.abs(vb[int(round(vb.shape[0]*9/10)):]), axis=0)*10))\n plt.xlabel('Distance from the stapes [mm]')\n plt.ylabel('BM velocity [dB re 1 mm/s]')\n plt.title('%d Hz'%(fp))\n plt.show()"
] | [
[
"numpy.eye",
"numpy.ones",
"numpy.cosh",
"numpy.zeros",
"numpy.linalg.inv",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"numpy.tanh"
]
] |
ivanwilliammd/32images_hdf5converter | [
"2956c163b790d1fc1c3248e46d17894dde52eeb9"
] | [
"Upsampled_4x/HDF5_converter_0.5.py"
] | [
"import glob\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport h5py\r\nimport IPython \r\nimport pandas as pd\r\nimport csv \r\n\r\ndf = pd.read_csv('lung_annotation_raw_Final.csv')\r\ndf = df[['ACC','TIPE','Xmin','Ymin','Xmax','Ymax','Zt_minsplitnum','Zt_minsplit_rev','Zt_maxsplitnum','Zt_maxsplit_rev','box_size']]\r\ndf\r\n\r\ncsv_file=open('lung_nodule_annotation_0.5.csv', mode='w+')\r\n\r\npath = '/home/ivanwilliam/Documents/Full_images/0.5/'\r\nall_dirs = os.listdir(path)\r\ndir_it=0\r\n\r\nheight = 2048\r\nwidth = 2048\r\nratio = height/512\r\n\r\nfor dir_it in range(len(all_dirs)):\r\n\tfile_path = '/home/ivanwilliam/Documents/Full_images/0.5/'+str(all_dirs[dir_it])\r\n\t# import IPython; IPython.embed()\r\n\r\n\tfor root, dirs, files in os.walk(file_path):\r\n\t\tprint('\\n\\tFound directory: %s' % root)\r\n\r\n\t\t# for subdir in dirs:\r\n\t\t# \tprint('SUBFOLDER OF ' + str(root) + ': ' + str(subdir))\r\n\t\t# \tnamedir = str(subdir)\r\n\t\t\r\n\t\tfileName = sorted(files, key=str)\r\n\t\tN_file = len(fileName)\r\n\r\n\t\ti = 1\r\n\t\tk = 0\r\n\t\t\r\n\t\tif i in range(N_file):\t\r\n\t\t\thdf32_list=[]\r\n\t\t\tfor fileName in sorted (files, key=str):\r\n\t\t\t\tif N_file-k*64>=64:\r\n\t\t\t\t\tprint('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))\r\n\t\t\t\t\tpicture_ds = cv2.imread('%s/%s' % (root, fileName))\r\n\t\t\t\t\tresize_picture_ds = cv2.resize(picture_ds, dsize=(height, width), interpolation=cv2.INTER_CUBIC)\r\n\t\t\t\t\tpics_array= np.array(resize_picture_ds) \r\n\r\n\t\t\t\t\tif i%64==1:\r\n\t\t\t\t\t\tpics32_list=[]\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\tif i%64==0:\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\tpics32_array=np.stack((pics32_list[0], pics32_list[2], pics32_list[4], pics32_list[6], pics32_list[8], \r\n\t\t\t\t\t\t\tpics32_list[10], pics32_list[12], pics32_list[14], pics32_list[16], pics32_list[18],\r\n\t\t\t\t\t\t\tpics32_list[20], pics32_list[22], pics32_list[24], pics32_list[26], pics32_list[28], \r\n\t\t\t\t\t\t\tpics32_list[30], pics32_list[32], pics32_list[34], pics32_list[36], pics32_list[38],\r\n\t\t\t\t\t\t\tpics32_list[40], pics32_list[42], pics32_list[44], pics32_list[46], pics32_list[48], \r\n\t\t\t\t\t\t\tpics32_list[50], pics32_list[52], pics32_list[54], pics32_list[56], pics32_list[58],\r\n\t\t\t\t\t\t\tpics32_list[60], pics32_list[62]), axis=0)\r\n\t\t\t\t\t\tprint('\\n Compiling 32 images into HDF list \\n')\r\n\t\t\t\t\t\thdf32_list.append(pics32_array)\r\n\t\t\t\t\t\tk = k+1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\ti=i+1\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tif i==k*64+1:\r\n\t\t\t\t\t\tprint('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))\r\n\t\t\t\t\t\tpicture_ds = cv2.imread('%s/%s' % (root, fileName))\r\n\t\t\t\t\t\tresize_picture_ds = cv2.resize(picture_ds, dsize=(height, width), interpolation=cv2.INTER_CUBIC)\r\n\t\t\t\t\t\tpics_array= np.array(resize_picture_ds)\r\n\t\t\t\t\t\tr = N_file - k*64\r\n\r\n\t\t\t\t\t\tprint('\\n\\tThere are less than 64 file remaining, using last 64 images as LAST BATCH of HDF5 from %d till %d' % (i, N_file))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tx = 64-r\r\n\t\t\t\t\t\tpics32_list = pics32_list[62-x:62]\r\n\t\t\t\t\t\tprint('\\t...............Start with '+str(len(pics32_list)) +' data(s) from previous batch...............')\r\n\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\ti=i+1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))\r\n\t\t\t\t\t\tpicture_ds = cv2.imread('%s/%s' % (root, fileName))\r\n\t\t\t\t\t\tresize_picture_ds = cv2.resize(picture_ds, dsize=(height, width), interpolation=cv2.INTER_CUBIC)\r\n\t\t\t\t\t\tpics_array= np.array(resize_picture_ds)\r\n\r\n\t\t\t\t\t\tif i==N_file:\r\n\t\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\t\tpics32_array=np.stack((pics32_list[0], pics32_list[2], pics32_list[4], pics32_list[6], pics32_list[8], \r\n\t\t\t\t\t\t\t\tpics32_list[10], pics32_list[12], pics32_list[14], pics32_list[16], pics32_list[18],\r\n\t\t\t\t\t\t\t\tpics32_list[20], pics32_list[22], pics32_list[24], pics32_list[26], pics32_list[28], \r\n\t\t\t\t\t\t\t\tpics32_list[30], pics32_list[32], pics32_list[34], pics32_list[36], pics32_list[38],\r\n\t\t\t\t\t\t\t\tpics32_list[40], pics32_list[42], pics32_list[44], pics32_list[46], pics32_list[48], \r\n\t\t\t\t\t\t\t\tpics32_list[50], pics32_list[52], pics32_list[54], pics32_list[56], pics32_list[58],\r\n\t\t\t\t\t\t\t\tpics32_list[60], pics32_list[62]), axis=0)\r\n\t\t\t\t\t\t\tprint('\\n Compiling LAST 32 images into 1 HDF list\\n')\r\n\t\t\t\t\t\t\thdf32_list.append(pics32_array)\r\n\t\t\t\t\t\t\tk=k+1\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpics32_list.append(pics_array)\r\n\t\t\t\t\t\ti=i+1\r\n\t\t\t\r\n\r\n\r\n\r\n######################################################################################\t\t\t\r\n\t\t\tif fileName[0:4]=='AGFA':\r\n\t\t\t\tsearch_str=fileName[0:16]\r\n\t\t\t\tdfAcc=df[df['ACC'].str.match(search_str)]\r\n\t\t\t\ttotal_dfAcc=dfAcc.shape[0]\r\n\t\t\t\tprint('Using AGFA as search keyword')\r\n\t\t\tif fileName[0:4]=='KDC6':\r\n\t\t\t\tsearch_str=fileName[0:10]\r\n\t\t\t\tdfAcc=df[df['ACC'].str.match(search_str)]\r\n\t\t\t\ttotal_dfAcc=dfAcc.shape[0]\r\n\t\t\t\tprint('Using KDC6 as search keyword')\r\n\t\t\telse:\r\n\t\t\t\tprint('Continue......................')\r\n\t\t\t\r\n\t\t\t# maxstopper = total_dfAcc - 1\r\n\t\t\t# import IPython;IPython.embed()\r\n\r\n\r\n\t\t\th=0\r\n\t\t\tp=0\t\t\r\n\t\t\tfor h in range(total_dfAcc):\r\n\t\t\t\ttipe = dfAcc.iloc[[h]].TIPE.values[0]\r\n\t\t\t\tx1 = dfAcc.iloc[[h]].Xmin.values[0]\r\n\t\t\t\ty1 = dfAcc.iloc[[h]].Ymin.values[0]\r\n\t\t\t\tx2 = dfAcc.iloc[[h]].Xmax.values[0]\r\n\t\t\t\ty2 = dfAcc.iloc[[h]].Ymax.values[0]\r\n\t\t\t\tz1_slice = dfAcc.iloc[[h]].Zt_minsplitnum.values[0]\r\n\t\t\t\tz1 = dfAcc.iloc[[h]].Zt_minsplit_rev.values[0]\r\n\t\t\t\tz2_slice = dfAcc.iloc[[h]].Zt_maxsplitnum.values[0]\r\n\t\t\t\tz2 = dfAcc.iloc[[h]].Zt_maxsplit_rev.values[0]\r\n\t\t\t\tbox_size = dfAcc.iloc[[h]].box_size.values[0]\r\n\r\n\t\t\t\tz1_order = str('%0*d' % (3, z1_slice))\r\n\t\t\t\tz2_order = str('%0*d' % (3, z2_slice))\r\n\r\n\t\t\t\tx1 = x1*ratio\r\n\t\t\t\ty1 = y1*ratio\r\n\t\t\t\tx2 = x2*ratio\r\n\t\t\t\ty2 = y2*ratio\r\n\t\t\t\tbox_size=box_size*ratio\r\n\t\t\t\t\r\n\t\t\t\t\r\n\r\n\t\t\t\t# import IPython;IPython.embed()\r\n \t\t\t\t\r\n\t\t\t\tif (x1==0 and x2==0 and y1 ==0 and y2==0) or box_size<32:\r\n\t\t\t\t\tprint ('\\tNo annotation or NODULE too small (<32pix) = '+str(all_dirs[dir_it])+'.......')\r\n\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t# csv_file.write(str(annot_file_path)+','','','','','','',''\\n')\r\n\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\tprint(hdf5_name+\" isn't made\")\r\n\t\t\t\t\th = h+1\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tif z1_slice==z2_slice:\r\n\t\t\t\t\t\t## Printing annotation for z1 & z2\r\n\t\t\t\t\t\tprint ('\\tACC and z1, z2 order match found = '+str(search_str)+'.......')\r\n\t\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\tcsv_file.write(str(annot_file_path)+','+str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+str(z1)+','+str(z2)+','+str(tipe)+'\\n')\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t## Write hdf5 file for z1 & z2\r\n\r\n\t\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\thdf5_path = '/media/ivanwilliam/Ivan_HDD_2TB/i3d_hdf5_lung_data_resized_4x/HDF5_file/'+str(hdf5_name)+'.h5'\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\texists = os.path.isfile(hdf5_path)\r\n\t\t\t\t\t\tif exists:\r\n\t\t\t\t\t\t\tprint(hdf5_name+\" already exists. \\n Continue to next hdf5 files .................\")\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\thdf_file = h5py.File(hdf5_path, 'w')\r\n\t\t\t\t\t\t\tmatrix123 = hdf32_list[z1_slice-1]\r\n\t\t\t\t\t\t\thdf_file.create_dataset(name='dataset', data=matrix123)\r\n\t\t\t\t\t\t\thdf_check=h5py.File(hdf5_path, 'r')\r\n\t\t\t\t\t\t\tbase_items = list (hdf_check.items())\r\n\t\t\t\t\t\t\tprint (\"HDF5_file at \"+str(hdf5_path)+\" which contain: \"+str(base_items)+\" successfully created\")\r\n\t\t\t\t\t\t\thdf_file.close()\r\n\t\t\t\t\r\n\t\t\t\t\t\tp = p+1\r\n\t\t\t\t\t\th = h+1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif z2_slice>z1_slice:\r\n\t\t\t\t\t\t\tprint ('\\tz2 and z1 slice position differ, splitting it into 2 file and annotation..........')\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t## Printing annotation for z1\r\n\t\t\t\t\t\t\tprint ('\\tACC and z1 match found = '+str(search_str)+'.......')\r\n\t\t\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\t\tcsv_file.write(str(annot_file_path)+','+str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+str(z1)+','+'32'+','+str(tipe)+'\\n')\r\n\r\n\t\t\t\t\t\t\t## Printing annotation for z2\r\n\t\t\t\t\t\t\tprint ('\\tACC and z2 match found = '+str(search_str)+'.......')\r\n\t\t\t\t\t\t\tannot_file_path = 'HDF5_File/'+str(str(all_dirs[dir_it])+'_'+str(z2_order))\r\n\t\t\t\t\t\t\tcsv_file.write(str(annot_file_path)+','+str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+'1'+','+str(z2)+','+str(tipe)+'\\n')\r\n\r\n\r\n\t\t\t\t\t\t\t## Write hdf5 file for z1\r\n\t\t\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z1_order))\r\n\t\t\t\t\t\t\thdf5_path = '/media/ivanwilliam/Ivan_HDD_2TB/i3d_hdf5_lung_data_resized_4x/HDF5_file/'+str(hdf5_name)+'.h5'\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\texists = os.path.isfile(hdf5_path)\r\n\t\t\t\t\t\t\tif exists:\r\n\t\t\t\t\t\t\t\tprint(hdf5_name+\" already exists. \\n Continue to next hdf5 files .................\")\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\thdf_file = h5py.File(hdf5_path, 'w')\r\n\t\t\t\t\t\t\t\tmatrix123 = hdf32_list[z1_slice-1]\r\n\t\t\t\t\t\t\t\thdf_file.create_dataset(name='dataset', data=matrix123)\r\n\t\t\t\t\t\t\t\thdf_check=h5py.File(hdf5_path, 'r')\r\n\t\t\t\t\t\t\t\tbase_items = list (hdf_check.items())\r\n\t\t\t\t\t\t\t\tprint (\"HDF5_file at \"+str(hdf5_path)+\" which contain: \"+str(base_items)+\" successfully created\")\r\n\t\t\t\t\t\t\t\thdf_file.close()\r\n\r\n\t\t\t\t\t\t\t## Write hdf5 file for z2\r\n\t\t\t\t\t\t\thdf5_name=str(str(all_dirs[dir_it])+'_'+str(z2_order))\r\n\t\t\t\t\t\t\thdf5_path = '/media/ivanwilliam/Ivan_HDD_2TB/i3d_hdf5_lung_data_resized_4x/HDF5_file/'+str(hdf5_name)+'.h5'\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\texists = os.path.isfile(hdf5_path)\r\n\t\t\t\t\t\t\tif exists:\r\n\t\t\t\t\t\t\t\tprint(hdf5_name+\" already exists. \\n Continue to next hdf5 files .................\")\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\thdf_file = h5py.File(hdf5_path, 'w')\r\n\t\t\t\t\t\t\t\tmatrix123 = hdf32_list[z2_slice-1]\r\n\t\t\t\t\t\t\t\thdf_file.create_dataset(name='dataset', data=matrix123)\r\n\t\t\t\t\t\t\t\thdf_check=h5py.File(hdf5_path, 'r')\r\n\t\t\t\t\t\t\t\tbase_items = list (hdf_check.items())\r\n\t\t\t\t\t\t\t\tprint (\"HDF5_file at \"+str(hdf5_path)+\" which contain: \"+str(base_items)+\" successfully created\")\r\n\t\t\t\t\t\t\t\thdf_file.close()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tp = p+1\r\n\t\t\t\t\t\t\th = h+1\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\th = h+1\r\n\t\t\tprint('\\tThere are %d new annotations added' %(p))\r\n\t\t\r\n\t\t\r\n\t\tk = 0\r\n\t\th = 0\r\n\t\ti = 1\r\n\t\thdf32_list=[]\r\n\t\tpics32_list=[]\r\n\t\tpics32_array=[]\r\n\t\tdir_it=dir_it + 1"
] | [
[
"pandas.read_csv",
"numpy.stack",
"numpy.array"
]
] |
theendsofinvention/cartoonify | [
"39ea84d96b3e93f0480e6d6158bea506d01278ca"
] | [
"cartoonify/app/object_detection/core/box_predictor.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Box predictor for object detectors.\n\nBox predictors are classes that take a high level\nimage feature map as input and produce two predictions,\n(1) a tensor encoding box locations, and\n(2) a tensor encoding classes for each box.\n\nThese components are passed directly to loss functions\nin our detection models.\n\nThese modules are separated from the main model since the same\nfew box predictor architectures are shared across many models.\n\"\"\"\nfrom abc import abstractmethod\nimport tensorflow as tf\nfrom app.object_detection.utils import ops\nfrom app.object_detection.utils import shape_utils\nfrom app.object_detection.utils import static_shape\n\nslim = tf.contrib.slim\n\nBOX_ENCODINGS = 'box_encodings'\nCLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'\nMASK_PREDICTIONS = 'mask_predictions'\n\n\nclass BoxPredictor(object):\n \"\"\"BoxPredictor.\"\"\"\n\n def __init__(self, is_training, num_classes):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n \"\"\"\n self._is_training = is_training\n self._num_classes = num_classes\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def predict(self, image_features, num_predictions_per_location, scope,\n **params):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Takes a high level image feature map as input and produce two predictions,\n (1) a tensor encoding box locations, and\n (2) a tensor encoding class scores for each corresponding box.\n In this interface, we only assume that two tensors are returned as output\n and do not assume anything about their shapes.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n scope: Variable and Op scope name.\n **params: Additional keyword arguments for specific implementations of\n BoxPredictor.\n\n Returns:\n A dictionary containing at least the following tensors.\n box_encodings: A float tensor of shape\n [batch_size, num_anchors, q, code_size] representing the location of\n the objects, where q is 1 or the number of classes.\n class_predictions_with_background: A float tensor of shape\n [batch_size, num_anchors, num_classes + 1] representing the class\n predictions for the proposals.\n \"\"\"\n with tf.variable_scope(scope):\n return self._predict(image_features, num_predictions_per_location,\n **params)\n\n # TODO: num_predictions_per_location could be moved to constructor.\n # This is currently only used by ConvolutionalBoxPredictor.\n @abstractmethod\n def _predict(self, image_features, num_predictions_per_location, **params):\n \"\"\"Implementations must override this method.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n **params: Additional keyword arguments for specific implementations of\n BoxPredictor.\n\n Returns:\n A dictionary containing at least the following tensors.\n box_encodings: A float tensor of shape\n [batch_size, num_anchors, q, code_size] representing the location of\n the objects, where q is 1 or the number of classes.\n class_predictions_with_background: A float tensor of shape\n [batch_size, num_anchors, num_classes + 1] representing the class\n predictions for the proposals.\n \"\"\"\n pass\n\n\nclass RfcnBoxPredictor(BoxPredictor):\n \"\"\"RFCN Box Predictor.\n\n Applies a position sensitve ROI pooling on position sensitive feature maps to\n predict classes and refined locations. See https://arxiv.org/abs/1605.06409\n for details.\n\n This is used for the second stage of the RFCN meta architecture. Notice that\n locations are *not* shared across classes, thus for each anchor, a separate\n prediction is made for each class.\n \"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n conv_hyperparams,\n num_spatial_bins,\n depth,\n crop_size,\n box_code_size):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n conv_hyperparams: Slim arg_scope with hyperparameters for conolutional\n layers.\n num_spatial_bins: A list of two integers `[spatial_bins_y,\n spatial_bins_x]`.\n depth: Target depth to reduce the input feature maps to.\n crop_size: A list of two integers `[crop_height, crop_width]`.\n box_code_size: Size of encoding for each box.\n \"\"\"\n super(RfcnBoxPredictor, self).__init__(is_training, num_classes)\n self._conv_hyperparams = conv_hyperparams\n self._num_spatial_bins = num_spatial_bins\n self._depth = depth\n self._crop_size = crop_size\n self._box_code_size = box_code_size\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def _predict(self, image_features, num_predictions_per_location,\n proposal_boxes):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n Currently, this must be set to 1, or an error will be raised.\n proposal_boxes: A float tensor of shape [batch_size, num_proposals,\n box_code_size].\n\n Returns:\n box_encodings: A float tensor of shape\n [batch_size, 1, num_classes, code_size] representing the\n location of the objects.\n class_predictions_with_background: A float tensor of shape\n [batch_size, 1, num_classes + 1] representing the class\n predictions for the proposals.\n Raises:\n ValueError: if num_predictions_per_location is not 1.\n \"\"\"\n if num_predictions_per_location != 1:\n raise ValueError('Currently RfcnBoxPredictor only supports '\n 'predicting a single box per class per location.')\n\n batch_size = tf.shape(proposal_boxes)[0]\n num_boxes = tf.shape(proposal_boxes)[1]\n def get_box_indices(proposals):\n proposals_shape = proposals.get_shape().as_list()\n if any(dim is None for dim in proposals_shape):\n proposals_shape = tf.shape(proposals)\n ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)\n multiplier = tf.expand_dims(\n tf.range(start=0, limit=proposals_shape[0]), 1)\n return tf.reshape(ones_mat * multiplier, [-1])\n\n net = image_features\n with slim.arg_scope(self._conv_hyperparams):\n net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth')\n # Location predictions.\n location_feature_map_depth = (self._num_spatial_bins[0] *\n self._num_spatial_bins[1] *\n self.num_classes *\n self._box_code_size)\n location_feature_map = slim.conv2d(net, location_feature_map_depth,\n [1, 1], activation_fn=None,\n scope='refined_locations')\n box_encodings = ops.position_sensitive_crop_regions(\n location_feature_map,\n boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]),\n box_ind=get_box_indices(proposal_boxes),\n crop_size=self._crop_size,\n num_spatial_bins=self._num_spatial_bins,\n global_pool=True)\n box_encodings = tf.squeeze(box_encodings, squeeze_dims=[1, 2])\n box_encodings = tf.reshape(box_encodings,\n [batch_size * num_boxes, 1, self.num_classes,\n self._box_code_size])\n\n # Class predictions.\n total_classes = self.num_classes + 1 # Account for background class.\n class_feature_map_depth = (self._num_spatial_bins[0] *\n self._num_spatial_bins[1] *\n total_classes)\n class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1],\n activation_fn=None,\n scope='class_predictions')\n class_predictions_with_background = ops.position_sensitive_crop_regions(\n class_feature_map,\n boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]),\n box_ind=get_box_indices(proposal_boxes),\n crop_size=self._crop_size,\n num_spatial_bins=self._num_spatial_bins,\n global_pool=True)\n class_predictions_with_background = tf.squeeze(\n class_predictions_with_background, squeeze_dims=[1, 2])\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background,\n [batch_size * num_boxes, 1, total_classes])\n\n return {BOX_ENCODINGS: box_encodings,\n CLASS_PREDICTIONS_WITH_BACKGROUND:\n class_predictions_with_background}\n\n\nclass MaskRCNNBoxPredictor(BoxPredictor):\n \"\"\"Mask R-CNN Box Predictor.\n\n See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017).\n Mask R-CNN. arXiv preprint arXiv:1703.06870.\n\n This is used for the second stage of the Mask R-CNN detector where proposals\n cropped from an image are arranged along the batch dimension of the input\n image_features tensor. Notice that locations are *not* shared across classes,\n thus for each anchor, a separate prediction is made for each class.\n\n In addition to predicting boxes and classes, optionally this class allows\n predicting masks and/or keypoints inside detection boxes.\n\n Currently this box predictor makes per-class predictions; that is, each\n anchor makes a separate box prediction for each class.\n \"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n fc_hyperparams,\n use_dropout,\n dropout_keep_prob,\n box_code_size,\n conv_hyperparams=None,\n predict_instance_masks=False,\n mask_height=14,\n mask_width=14,\n mask_prediction_conv_depth=256,\n predict_keypoints=False):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n fc_hyperparams: Slim arg_scope with hyperparameters for fully\n connected ops.\n use_dropout: Option to use dropout or not. Note that a single dropout\n op is applied here prior to both box and class predictions, which stands\n in contrast to the ConvolutionalBoxPredictor below.\n dropout_keep_prob: Keep probability for dropout.\n This is only used if use_dropout is True.\n box_code_size: Size of encoding for each box.\n conv_hyperparams: Slim arg_scope with hyperparameters for convolution\n ops.\n predict_instance_masks: Whether to predict object masks inside detection\n boxes.\n mask_height: Desired output mask height. The default value is 14.\n mask_width: Desired output mask width. The default value is 14.\n mask_prediction_conv_depth: The depth for the first conv2d_transpose op\n applied to the image_features in the mask prediciton branch.\n predict_keypoints: Whether to predict keypoints insde detection boxes.\n\n\n Raises:\n ValueError: If predict_instance_masks or predict_keypoints is true.\n \"\"\"\n super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes)\n self._fc_hyperparams = fc_hyperparams\n self._use_dropout = use_dropout\n self._box_code_size = box_code_size\n self._dropout_keep_prob = dropout_keep_prob\n self._conv_hyperparams = conv_hyperparams\n self._predict_instance_masks = predict_instance_masks\n self._mask_height = mask_height\n self._mask_width = mask_width\n self._mask_prediction_conv_depth = mask_prediction_conv_depth\n self._predict_keypoints = predict_keypoints\n if self._predict_keypoints:\n raise ValueError('Keypoint prediction is unimplemented.')\n if ((self._predict_instance_masks or self._predict_keypoints) and\n self._conv_hyperparams is None):\n raise ValueError('`conv_hyperparams` must be provided when predicting '\n 'masks.')\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def _predict(self, image_features, num_predictions_per_location):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Flattens image_features and applies fully connected ops (with no\n non-linearity) to predict box encodings and class predictions. In this\n setting, anchors are not spatially arranged in any way and are assumed to\n have been folded into the batch dimension. Thus we output 1 for the\n anchors dimension.\n\n Also optionally predicts instance masks.\n The mask prediction head is based on the Mask RCNN paper with the following\n modifications: We replace the deconvolution layer with a bilinear resize\n and a convolution.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n Currently, this must be set to 1, or an error will be raised.\n\n Returns:\n A dictionary containing the following tensors.\n box_encodings: A float tensor of shape\n [batch_size, 1, num_classes, code_size] representing the\n location of the objects.\n class_predictions_with_background: A float tensor of shape\n [batch_size, 1, num_classes + 1] representing the class\n predictions for the proposals.\n If predict_masks is True the dictionary also contains:\n instance_masks: A float tensor of shape\n [batch_size, 1, num_classes, image_height, image_width]\n If predict_keypoints is True the dictionary also contains:\n keypoints: [batch_size, 1, num_keypoints, 2]\n\n Raises:\n ValueError: if num_predictions_per_location is not 1.\n \"\"\"\n if num_predictions_per_location != 1:\n raise ValueError('Currently FullyConnectedBoxPredictor only supports '\n 'predicting a single box per class per location.')\n spatial_averaged_image_features = tf.reduce_mean(image_features, [1, 2],\n keep_dims=True,\n name='AvgPool')\n flattened_image_features = slim.flatten(spatial_averaged_image_features)\n if self._use_dropout:\n flattened_image_features = slim.dropout(flattened_image_features,\n keep_prob=self._dropout_keep_prob,\n is_training=self._is_training)\n with slim.arg_scope(self._fc_hyperparams):\n box_encodings = slim.fully_connected(\n flattened_image_features,\n self._num_classes * self._box_code_size,\n activation_fn=None,\n scope='BoxEncodingPredictor')\n class_predictions_with_background = slim.fully_connected(\n flattened_image_features,\n self._num_classes + 1,\n activation_fn=None,\n scope='ClassPredictor')\n box_encodings = tf.reshape(\n box_encodings, [-1, 1, self._num_classes, self._box_code_size])\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background, [-1, 1, self._num_classes + 1])\n\n predictions_dict = {\n BOX_ENCODINGS: box_encodings,\n CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background\n }\n\n if self._predict_instance_masks:\n with slim.arg_scope(self._conv_hyperparams):\n upsampled_features = tf.image.resize_bilinear(\n image_features,\n [self._mask_height, self._mask_width],\n align_corners=True)\n upsampled_features = slim.conv2d(\n upsampled_features,\n num_outputs=self._mask_prediction_conv_depth,\n kernel_size=[2, 2])\n mask_predictions = slim.conv2d(upsampled_features,\n num_outputs=self.num_classes,\n activation_fn=None,\n kernel_size=[3, 3])\n instance_masks = tf.expand_dims(tf.transpose(mask_predictions,\n perm=[0, 3, 1, 2]),\n axis=1,\n name='MaskPredictor')\n predictions_dict[MASK_PREDICTIONS] = instance_masks\n return predictions_dict\n\n\nclass ConvolutionalBoxPredictor(BoxPredictor):\n \"\"\"Convolutional Box Predictor.\n\n Optionally add an intermediate 1x1 convolutional layer after features and\n predict in parallel branches box_encodings and\n class_predictions_with_background.\n\n Currently this box predictor assumes that predictions are \"shared\" across\n classes --- that is each anchor makes box predictions which do not depend\n on class.\n \"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n conv_hyperparams,\n min_depth,\n max_depth,\n num_layers_before_predictor,\n use_dropout,\n dropout_keep_prob,\n kernel_size,\n box_code_size,\n apply_sigmoid_to_scores=False,\n class_prediction_bias_init=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: Indicates whether the BoxPredictor is in training mode.\n num_classes: number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n conv_hyperparams: Slim arg_scope with hyperparameters for convolution ops.\n min_depth: Minumum feature depth prior to predicting box encodings\n and class predictions.\n max_depth: Maximum feature depth prior to predicting box encodings\n and class predictions. If max_depth is set to 0, no additional\n feature map will be inserted before location and class predictions.\n num_layers_before_predictor: Number of the additional conv layers before\n the predictor.\n use_dropout: Option to use dropout for class prediction or not.\n dropout_keep_prob: Keep probability for dropout.\n This is only used if use_dropout is True.\n kernel_size: Size of final convolution kernel. If the\n spatial resolution of the feature map is smaller than the kernel size,\n then the kernel size is automatically set to be\n min(feature_width, feature_height).\n box_code_size: Size of encoding for each box.\n apply_sigmoid_to_scores: if True, apply the sigmoid on the output\n class_predictions.\n class_prediction_bias_init: constant value to initialize bias of the last\n conv2d layer before class prediction.\n\n Raises:\n ValueError: if min_depth > max_depth.\n \"\"\"\n super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes)\n if min_depth > max_depth:\n raise ValueError('min_depth should be less than or equal to max_depth')\n self._conv_hyperparams = conv_hyperparams\n self._min_depth = min_depth\n self._max_depth = max_depth\n self._num_layers_before_predictor = num_layers_before_predictor\n self._use_dropout = use_dropout\n self._kernel_size = kernel_size\n self._box_code_size = box_code_size\n self._dropout_keep_prob = dropout_keep_prob\n self._apply_sigmoid_to_scores = apply_sigmoid_to_scores\n self._class_prediction_bias_init = class_prediction_bias_init\n\n def _predict(self, image_features, num_predictions_per_location):\n \"\"\"Computes encoded object locations and corresponding confidences.\n\n Args:\n image_features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: an integer representing the number of box\n predictions to be made per spatial location in the feature map.\n\n Returns:\n A dictionary containing the following tensors.\n box_encodings: A float tensor of shape [batch_size, num_anchors, 1,\n code_size] representing the location of the objects, where\n num_anchors = feat_height * feat_width * num_predictions_per_location\n class_predictions_with_background: A float tensor of shape\n [batch_size, num_anchors, num_classes + 1] representing the class\n predictions for the proposals.\n \"\"\"\n # Add a slot for the background class.\n num_class_slots = self.num_classes + 1\n net = image_features\n with slim.arg_scope(self._conv_hyperparams), \\\n slim.arg_scope([slim.dropout], is_training=self._is_training):\n # Add additional conv layers before the class predictor.\n features_depth = static_shape.get_depth(image_features.get_shape())\n depth = max(min(features_depth, self._max_depth), self._min_depth)\n tf.logging.info('depth of additional conv before box predictor: {}'.\n format(depth))\n if depth > 0 and self._num_layers_before_predictor > 0:\n for i in range(self._num_layers_before_predictor):\n net = slim.conv2d(\n net, depth, [1, 1], scope='Conv2d_%d_1x1_%d' % (i, depth))\n with slim.arg_scope([slim.conv2d], activation_fn=None,\n normalizer_fn=None, normalizer_params=None):\n box_encodings = slim.conv2d(\n net, num_predictions_per_location * self._box_code_size,\n [self._kernel_size, self._kernel_size],\n scope='BoxEncodingPredictor')\n if self._use_dropout:\n net = slim.dropout(net, keep_prob=self._dropout_keep_prob)\n class_predictions_with_background = slim.conv2d(\n net, num_predictions_per_location * num_class_slots,\n [self._kernel_size, self._kernel_size], scope='ClassPredictor',\n biases_initializer=tf.constant_initializer(\n self._class_prediction_bias_init))\n if self._apply_sigmoid_to_scores:\n class_predictions_with_background = tf.sigmoid(\n class_predictions_with_background)\n\n combined_feature_map_shape = shape_utils.combined_static_and_dynamic_shape(\n image_features)\n box_encodings = tf.reshape(\n box_encodings, tf.stack([combined_feature_map_shape[0],\n combined_feature_map_shape[1] *\n combined_feature_map_shape[2] *\n num_predictions_per_location,\n 1, self._box_code_size]))\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background,\n tf.stack([combined_feature_map_shape[0],\n combined_feature_map_shape[1] *\n combined_feature_map_shape[2] *\n num_predictions_per_location,\n num_class_slots]))\n return {BOX_ENCODINGS: box_encodings,\n CLASS_PREDICTIONS_WITH_BACKGROUND:\n class_predictions_with_background}\n"
] | [
[
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.constant_initializer",
"tensorflow.range",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.image.resize_bilinear",
"tensorflow.transpose"
]
] |
afeedh/facenet | [
"a70159a7c9850a49acd789824273b9b8933a61e8"
] | [
"facenet/src/train_tripletloss.py"
] | [
"\"\"\"Training a face recognizer with TensorFlow based on the FaceNet paper\nFaceNet: A Unified Embedding for Face Recognition and Clustering: http://arxiv.org/abs/1503.03832\n\"\"\"\n# MIT License\n#\n# Copyright (c) 2016 David Sandberg\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport time\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport importlib\nimport itertools\nimport argparse\nimport facenet.src.facenet as fc\nfrom facenet.src import lfw\n\nfrom tensorflow.python.ops import data_flow_ops\n\nfrom six.moves import xrange # @UnresolvedImport\n\n\ndef main(args):\n\n network = importlib.import_module(args.model_def)\n\n subdir = datetime.strftime(datetime.now(), \"%Y%m%d-%H%M%S\")\n log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)\n if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist\n os.makedirs(log_dir)\n model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)\n if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist\n os.makedirs(model_dir)\n\n # Write arguments to a text file\n fc.write_arguments_to_file(args, os.path.join(log_dir, \"arguments.txt\"))\n\n # Store some git revision info in a text file in the log directory\n src_path, _ = os.path.split(os.path.realpath(__file__))\n fc.store_revision_info(src_path, log_dir, \" \".join(sys.argv))\n\n np.random.seed(seed=args.seed)\n train_set = fc.get_dataset(args.data_dir)\n\n print(\"Model directory: %s\" % model_dir)\n print(\"Log directory: %s\" % log_dir)\n if args.pretrained_model:\n print(\"Pre-trained model: %s\" % os.path.expanduser(args.pretrained_model))\n\n if args.lfw_dir:\n print(\"LFW directory: %s\" % args.lfw_dir)\n # Read the file containing the pairs used for testing\n pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))\n # Get the paths for the corresponding images\n lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)\n\n with tf.Graph().as_default():\n tf.set_random_seed(args.seed)\n global_step = tf.Variable(0, trainable=False)\n\n # Placeholder for the learning rate\n learning_rate_placeholder = tf.placeholder(tf.float32, name=\"learning_rate\")\n\n batch_size_placeholder = tf.placeholder(tf.int32, name=\"batch_size\")\n\n phase_train_placeholder = tf.placeholder(tf.bool, name=\"phase_train\")\n\n image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 3), name=\"image_paths\")\n labels_placeholder = tf.placeholder(tf.int64, shape=(None, 3), name=\"labels\")\n\n input_queue = data_flow_ops.FIFOQueue(\n capacity=100000, dtypes=[tf.string, tf.int64], shapes=[(3,), (3,)], shared_name=None, name=None\n )\n enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])\n\n nrof_preprocess_threads = 4\n images_and_labels = []\n for _ in range(nrof_preprocess_threads):\n filenames, label = input_queue.dequeue()\n images = []\n for filename in tf.unstack(filenames):\n file_contents = tf.read_file(filename)\n image = tf.image.decode_image(file_contents, channels=3)\n\n if args.random_crop:\n image = tf.random_crop(image, [args.image_size, args.image_size, 3])\n else:\n image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)\n if args.random_flip:\n image = tf.image.random_flip_left_right(image)\n\n # pylint: disable=no-member\n image.set_shape((args.image_size, args.image_size, 3))\n images.append(tf.image.per_image_standardization(image))\n images_and_labels.append([images, label])\n\n image_batch, labels_batch = tf.train.batch_join(\n images_and_labels,\n batch_size=batch_size_placeholder,\n shapes=[(args.image_size, args.image_size, 3), ()],\n enqueue_many=True,\n capacity=4 * nrof_preprocess_threads * args.batch_size,\n allow_smaller_final_batch=True,\n )\n image_batch = tf.identity(image_batch, \"image_batch\")\n image_batch = tf.identity(image_batch, \"input\")\n labels_batch = tf.identity(labels_batch, \"label_batch\")\n\n # Build the inference graph\n prelogits, _ = network.inference(\n image_batch,\n args.keep_probability,\n phase_train=phase_train_placeholder,\n bottleneck_layer_size=args.embedding_size,\n weight_decay=args.weight_decay,\n )\n\n embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name=\"embeddings\")\n # Split embeddings into anchor, positive and negative and calculate triplet loss\n anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)\n triplet_loss = fc.triplet_loss(anchor, positive, negative, args.alpha)\n\n learning_rate = tf.train.exponential_decay(\n learning_rate_placeholder,\n global_step,\n args.learning_rate_decay_epochs * args.epoch_size,\n args.learning_rate_decay_factor,\n staircase=True,\n )\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n # Calculate the total losses\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n([triplet_loss] + regularization_losses, name=\"total_loss\")\n\n # Build a Graph that trains the model with one batch of examples and updates the model parameters\n train_op = fc.train(\n total_loss, global_step, args.optimizer, learning_rate, args.moving_average_decay, tf.global_variables()\n )\n\n # Create a saver\n saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n # Start running operations on the Graph.\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Initialize variables\n sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder: True})\n sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder: True})\n\n summary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(coord=coord, sess=sess)\n\n with sess.as_default():\n\n if args.pretrained_model:\n print(\"Restoring pretrained model: %s\" % args.pretrained_model)\n saver.restore(sess, os.path.expanduser(args.pretrained_model))\n\n # Training and validation loop\n epoch = 0\n while epoch < args.max_nrof_epochs:\n step = sess.run(global_step, feed_dict=None)\n epoch = step // args.epoch_size\n # Train for one epoch\n train(\n args,\n sess,\n train_set,\n epoch,\n image_paths_placeholder,\n labels_placeholder,\n labels_batch,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n input_queue,\n global_step,\n embeddings,\n total_loss,\n train_op,\n summary_op,\n summary_writer,\n args.learning_rate_schedule_file,\n args.embedding_size,\n anchor,\n positive,\n negative,\n triplet_loss,\n )\n\n # Save variables and the metagraph if it doesn't exist already\n save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)\n\n # Evaluate on LFW\n if args.lfw_dir:\n evaluate(\n sess,\n lfw_paths,\n embeddings,\n labels_batch,\n image_paths_placeholder,\n labels_placeholder,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n actual_issame,\n args.batch_size,\n args.lfw_nrof_folds,\n log_dir,\n step,\n summary_writer,\n args.embedding_size,\n )\n\n return model_dir\n\n\ndef train(\n args,\n sess,\n dataset,\n epoch,\n image_paths_placeholder,\n labels_placeholder,\n labels_batch,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n input_queue,\n global_step,\n embeddings,\n loss,\n train_op,\n summary_op,\n summary_writer,\n learning_rate_schedule_file,\n embedding_size,\n anchor,\n positive,\n negative,\n triplet_loss,\n):\n batch_number = 0\n\n if args.learning_rate > 0.0:\n lr = args.learning_rate\n else:\n lr = fc.get_learning_rate_from_file(learning_rate_schedule_file, epoch)\n while batch_number < args.epoch_size:\n # Sample people randomly from the dataset\n image_paths, num_per_class = sample_people(dataset, args.people_per_batch, args.images_per_person)\n\n print(\"Running forward pass on sampled images: \", end=\"\")\n start_time = time.time()\n nrof_examples = args.people_per_batch * args.images_per_person\n labels_array = np.reshape(np.arange(nrof_examples), (-1, 3))\n image_paths_array = np.reshape(np.expand_dims(np.array(image_paths), 1), (-1, 3))\n sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n emb_array = np.zeros((nrof_examples, embedding_size))\n nrof_batches = int(np.ceil(nrof_examples / args.batch_size))\n for i in range(nrof_batches):\n batch_size = min(nrof_examples - i * args.batch_size, args.batch_size)\n emb, lab = sess.run(\n [embeddings, labels_batch],\n feed_dict={\n batch_size_placeholder: batch_size,\n learning_rate_placeholder: lr,\n phase_train_placeholder: True,\n },\n )\n emb_array[lab, :] = emb\n print(\"%.3f\" % (time.time() - start_time))\n\n # Select triplets based on the embeddings\n print(\"Selecting suitable triplets for training\")\n triplets, nrof_random_negs, nrof_triplets = select_triplets(\n emb_array, num_per_class, image_paths, args.people_per_batch, args.alpha\n )\n selection_time = time.time() - start_time\n print(\n \"(nrof_random_negs, nrof_triplets) = (%d, %d): time=%.3f seconds\"\n % (nrof_random_negs, nrof_triplets, selection_time)\n )\n\n # Perform training on the selected triplets\n nrof_batches = int(np.ceil(nrof_triplets * 3 / args.batch_size))\n triplet_paths = list(itertools.chain(*triplets))\n labels_array = np.reshape(np.arange(len(triplet_paths)), (-1, 3))\n triplet_paths_array = np.reshape(np.expand_dims(np.array(triplet_paths), 1), (-1, 3))\n sess.run(enqueue_op, {image_paths_placeholder: triplet_paths_array, labels_placeholder: labels_array})\n nrof_examples = len(triplet_paths)\n train_time = 0\n i = 0\n emb_array = np.zeros((nrof_examples, embedding_size))\n loss_array = np.zeros((nrof_triplets,))\n summary = tf.Summary()\n step = 0\n while i < nrof_batches:\n start_time = time.time()\n batch_size = min(nrof_examples - i * args.batch_size, args.batch_size)\n feed_dict = {\n batch_size_placeholder: batch_size,\n learning_rate_placeholder: lr,\n phase_train_placeholder: True,\n }\n err, _, step, emb, lab = sess.run(\n [loss, train_op, global_step, embeddings, labels_batch], feed_dict=feed_dict\n )\n emb_array[lab, :] = emb\n loss_array[i] = err\n duration = time.time() - start_time\n print(\n \"Epoch: [%d][%d/%d]\\tTime %.3f\\tLoss %2.3f\" % (epoch, batch_number + 1, args.epoch_size, duration, err)\n )\n batch_number += 1\n i += 1\n train_time += duration\n summary.value.add(tag=\"loss\", simple_value=err)\n\n # Add validation loss and accuracy to summary\n # pylint: disable=maybe-no-member\n summary.value.add(tag=\"time/selection\", simple_value=selection_time)\n summary_writer.add_summary(summary, step)\n return step\n\n\ndef select_triplets(embeddings, nrof_images_per_class, image_paths, people_per_batch, alpha):\n \"\"\"Select the triplets for training\"\"\"\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n\n # VGG Face: Choosing good triplets is crucial and should strike a balance between\n # selecting informative (i.e. challenging) examples and swamping training with examples that\n # are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling\n # the image n at random, but only between the ones that violate the triplet loss margin. The\n # latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than\n # choosing the maximally violating example, as often done in structured output learning.\n\n for i in xrange(people_per_batch):\n nrof_images = int(nrof_images_per_class[i])\n for j in xrange(1, nrof_images):\n a_idx = emb_start_idx + j - 1\n neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)\n for pair in xrange(j, nrof_images): # For every possible positive pair.\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(np.square(embeddings[a_idx] - embeddings[p_idx]))\n neg_dists_sqr[emb_start_idx : emb_start_idx + nrof_images] = np.NaN\n # all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection\n all_neg = np.where(neg_dists_sqr - pos_dist_sqr < alpha)[0] # VGG Face selecction\n nrof_random_negs = all_neg.shape[0]\n if nrof_random_negs > 0:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n triplets.append((image_paths[a_idx], image_paths[p_idx], image_paths[n_idx]))\n # print('Triplet %d: (%d, %d, %d), pos_dist=%2.6f, neg_dist=%2.6f (%d, %d, %d, %d, %d)' %\n # (trip_idx, a_idx, p_idx, n_idx, pos_dist_sqr, neg_dists_sqr[n_idx], nrof_random_negs, rnd_idx, i, j, emb_start_idx))\n trip_idx += 1\n\n num_trips += 1\n\n emb_start_idx += nrof_images\n\n np.random.shuffle(triplets)\n return triplets, num_trips, len(triplets)\n\n\ndef sample_people(dataset, people_per_batch, images_per_person):\n nrof_images = people_per_batch * images_per_person\n\n # Sample classes from the dataset\n nrof_classes = len(dataset)\n class_indices = np.arange(nrof_classes)\n np.random.shuffle(class_indices)\n\n i = 0\n image_paths = []\n num_per_class = []\n sampled_class_indices = []\n # Sample images from these classes until we have enough\n while len(image_paths) < nrof_images:\n class_index = class_indices[i]\n nrof_images_in_class = len(dataset[class_index])\n image_indices = np.arange(nrof_images_in_class)\n np.random.shuffle(image_indices)\n nrof_images_from_class = min(nrof_images_in_class, images_per_person, nrof_images - len(image_paths))\n idx = image_indices[0:nrof_images_from_class]\n image_paths_for_class = [dataset[class_index].image_paths[j] for j in idx]\n sampled_class_indices += [class_index] * nrof_images_from_class\n image_paths += image_paths_for_class\n num_per_class.append(nrof_images_from_class)\n i += 1\n\n return image_paths, num_per_class\n\n\ndef evaluate(\n sess,\n image_paths,\n embeddings,\n labels_batch,\n image_paths_placeholder,\n labels_placeholder,\n batch_size_placeholder,\n learning_rate_placeholder,\n phase_train_placeholder,\n enqueue_op,\n actual_issame,\n batch_size,\n nrof_folds,\n log_dir,\n step,\n summary_writer,\n embedding_size,\n):\n start_time = time.time()\n # Run forward pass to calculate embeddings\n print(\"Running forward pass on LFW images: \", end=\"\")\n\n nrof_images = len(actual_issame) * 2\n assert len(image_paths) == nrof_images\n labels_array = np.reshape(np.arange(nrof_images), (-1, 3))\n image_paths_array = np.reshape(np.expand_dims(np.array(image_paths), 1), (-1, 3))\n sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n emb_array = np.zeros((nrof_images, embedding_size))\n nrof_batches = int(np.ceil(nrof_images / batch_size))\n label_check_array = np.zeros((nrof_images,))\n for i in xrange(nrof_batches):\n batch_size = min(nrof_images - i * batch_size, batch_size)\n emb, lab = sess.run(\n [embeddings, labels_batch],\n feed_dict={\n batch_size_placeholder: batch_size,\n learning_rate_placeholder: 0.0,\n phase_train_placeholder: False,\n },\n )\n emb_array[lab, :] = emb\n label_check_array[lab] = 1\n print(\"%.3f\" % (time.time() - start_time))\n\n assert np.all(label_check_array == 1)\n\n _, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds)\n\n print(\"Accuracy: %1.3f+-%1.3f\" % (np.mean(accuracy), np.std(accuracy)))\n print(\"Validation rate: %2.5f+-%2.5f @ FAR=%2.5f\" % (val, val_std, far))\n lfw_time = time.time() - start_time\n # Add validation loss and accuracy to summary\n summary = tf.Summary()\n # pylint: disable=maybe-no-member\n summary.value.add(tag=\"lfw/accuracy\", simple_value=np.mean(accuracy))\n summary.value.add(tag=\"lfw/val_rate\", simple_value=val)\n summary.value.add(tag=\"time/lfw\", simple_value=lfw_time)\n summary_writer.add_summary(summary, step)\n with open(os.path.join(log_dir, \"lfw_result.txt\"), \"at\") as f:\n f.write(\"%d\\t%.5f\\t%.5f\\n\" % (step, np.mean(accuracy), val))\n\n\ndef save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):\n # Save the model checkpoint\n print(\"Saving variables\")\n start_time = time.time()\n checkpoint_path = os.path.join(model_dir, \"model-%s.ckpt\" % model_name)\n saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)\n save_time_variables = time.time() - start_time\n print(\"Variables saved in %.2f seconds\" % save_time_variables)\n metagraph_filename = os.path.join(model_dir, \"model-%s.meta\" % model_name)\n save_time_metagraph = 0\n if not os.path.exists(metagraph_filename):\n print(\"Saving metagraph\")\n start_time = time.time()\n saver.export_meta_graph(metagraph_filename)\n save_time_metagraph = time.time() - start_time\n print(\"Metagraph saved in %.2f seconds\" % save_time_metagraph)\n summary = tf.Summary()\n # pylint: disable=maybe-no-member\n summary.value.add(tag=\"time/save_variables\", simple_value=save_time_variables)\n summary.value.add(tag=\"time/save_metagraph\", simple_value=save_time_metagraph)\n summary_writer.add_summary(summary, step)\n\n\ndef get_learning_rate_from_file(filename, epoch):\n with open(filename, \"r\") as f:\n for line in f.readlines():\n line = line.split(\"#\", 1)[0]\n if line:\n par = line.strip().split(\":\")\n e = int(par[0])\n lr = float(par[1])\n if e <= epoch:\n learning_rate = lr\n else:\n return learning_rate\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--logs_base_dir\", type=str, help=\"Directory where to write event logs.\", default=\"~/logs/facenet\"\n )\n parser.add_argument(\n \"--models_base_dir\",\n type=str,\n help=\"Directory where to write trained models and checkpoints.\",\n default=\"~/models/facenet\",\n )\n parser.add_argument(\n \"--gpu_memory_fraction\",\n type=float,\n help=\"Upper bound on the amount of GPU memory that will be used by the process.\",\n default=1.0,\n )\n parser.add_argument(\"--pretrained_model\", type=str, help=\"Load a pretrained model before training starts.\")\n parser.add_argument(\n \"--data_dir\",\n type=str,\n help=\"Path to the data directory containing aligned face patches.\",\n default=\"~/datasets/casia/casia_maxpy_mtcnnalign_182_160\",\n )\n parser.add_argument(\n \"--model_def\",\n type=str,\n help=\"Model definition. Points to a module containing the definition of the inference graph.\",\n default=\"models.inception_resnet_v1\",\n )\n parser.add_argument(\"--max_nrof_epochs\", type=int, help=\"Number of epochs to run.\", default=500)\n parser.add_argument(\"--batch_size\", type=int, help=\"Number of images to process in a batch.\", default=90)\n parser.add_argument(\"--image_size\", type=int, help=\"Image size (height, width) in pixels.\", default=160)\n parser.add_argument(\"--people_per_batch\", type=int, help=\"Number of people per batch.\", default=45)\n parser.add_argument(\"--images_per_person\", type=int, help=\"Number of images per person.\", default=40)\n parser.add_argument(\"--epoch_size\", type=int, help=\"Number of batches per epoch.\", default=1000)\n parser.add_argument(\"--alpha\", type=float, help=\"Positive to negative triplet distance margin.\", default=0.2)\n parser.add_argument(\"--embedding_size\", type=int, help=\"Dimensionality of the embedding.\", default=128)\n parser.add_argument(\n \"--random_crop\",\n help=\"Performs random cropping of training images. If false, the center image_size pixels from the training images are used. \"\n + \"If the size of the images in the data directory is equal to image_size no cropping is performed\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--random_flip\", help=\"Performs random horizontal flipping of training images.\", action=\"store_true\"\n )\n parser.add_argument(\n \"--keep_probability\",\n type=float,\n help=\"Keep probability of dropout for the fully connected layer(s).\",\n default=1.0,\n )\n parser.add_argument(\"--weight_decay\", type=float, help=\"L2 weight regularization.\", default=0.0)\n parser.add_argument(\n \"--optimizer\",\n type=str,\n choices=[\"ADAGRAD\", \"ADADELTA\", \"ADAM\", \"RMSPROP\", \"MOM\"],\n help=\"The optimization algorithm to use\",\n default=\"ADAGRAD\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n help=\"Initial learning rate. If set to a negative value a learning rate \"\n + 'schedule can be specified in the file \"learning_rate_schedule.txt\"',\n default=0.1,\n )\n parser.add_argument(\n \"--learning_rate_decay_epochs\", type=int, help=\"Number of epochs between learning rate decay.\", default=100\n )\n parser.add_argument(\"--learning_rate_decay_factor\", type=float, help=\"Learning rate decay factor.\", default=1.0)\n parser.add_argument(\n \"--moving_average_decay\",\n type=float,\n help=\"Exponential decay for tracking of training parameters.\",\n default=0.9999,\n )\n parser.add_argument(\"--seed\", type=int, help=\"Random seed.\", default=666)\n parser.add_argument(\n \"--learning_rate_schedule_file\",\n type=str,\n help=\"File containing the learning rate schedule that is used when learning_rate is set to to -1.\",\n default=\"data/learning_rate_schedule.txt\",\n )\n\n # Parameters for validation on LFW\n parser.add_argument(\n \"--lfw_pairs\", type=str, help=\"The file containing the pairs to use for validation.\", default=\"data/pairs.txt\"\n )\n parser.add_argument(\n \"--lfw_dir\", type=str, help=\"Path to the data directory containing aligned face patches.\", default=\"\"\n )\n parser.add_argument(\n \"--lfw_nrof_folds\",\n type=int,\n help=\"Number of folds to use for cross validation. Mainly used for testing.\",\n default=10,\n )\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n main(parse_arguments(sys.argv[1:]))\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.reshape",
"tensorflow.unstack",
"numpy.random.seed",
"tensorflow.Variable",
"tensorflow.identity",
"tensorflow.summary.FileWriter",
"tensorflow.global_variables_initializer",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"tensorflow.Graph",
"tensorflow.train.exponential_decay",
"tensorflow.train.batch_join",
"numpy.where",
"tensorflow.image.per_image_standardization",
"numpy.mean",
"tensorflow.add_n",
"numpy.zeros",
"numpy.ceil",
"tensorflow.random_crop",
"tensorflow.image.random_flip_left_right",
"tensorflow.get_collection",
"tensorflow.read_file",
"tensorflow.global_variables",
"numpy.arange",
"tensorflow.train.start_queue_runners",
"numpy.all",
"tensorflow.set_random_seed",
"numpy.std",
"numpy.square",
"tensorflow.local_variables_initializer",
"tensorflow.nn.l2_normalize",
"tensorflow.train.Coordinator",
"tensorflow.image.decode_image",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"numpy.random.shuffle",
"tensorflow.summary.merge_all",
"tensorflow.trainable_variables",
"numpy.array",
"numpy.random.randint",
"tensorflow.GPUOptions",
"tensorflow.Summary"
]
] |
preylol/pbrt-v3 | [
"29661bf4caf9e2df2bf21f2a28ac8e53b2439f9f"
] | [
"evaluation/scripts/python-scripts/exr2png.py"
] | [
"import os\nimport sys\nimport pyexr\nimport numpy as np\nfrom PIL import Image\nimport re\n \ndef exec():\n filepaths = []\n savepaths = []\n images = []\n maxvalues = []\n # Prep variable\n filelist = os.listdir(\"output\")\n for file in filelist:\n if file.endswith(\".exr\"):\n filepath = os.path.join(\"output\", file)\n savepath = sys.argv[0][:-len(\"exr2png.py\")] + \"../../plots/renders/\"\n image = pyexr.open(filepath).get()\n images.append(image)\n maxvalues.append(np.max(image))\n filepaths.append(filepath)\n scenename = re.match(r\".*(crown|measure-one|villa|killeroo|hair|ecosys|landscape).*\", file)[1]\n savepaths.append(savepath + scenename + \".png\")\n for i in range(len(images)):\n #images[i] *= 16 / maxvalues[i]\n images[i] = np.where(images[i]<=0.0031308,12.92 * images[i], 1.055*(images[i]**(1/2.4)) - 0.055)\n images[i] = np.clip(images[i], 0, 1)\n images[i] = (images[i] * 255).astype(np.uint8)\n Image.fromarray(images[i]).save(savepaths[i])\n \nexec()\n"
] | [
[
"numpy.where",
"numpy.clip",
"numpy.max"
]
] |
DVM000/keras-yolo3 | [
"ef0baf2ce19b8b637e5535d04ead48020caa06c5"
] | [
"train.py"
] | [
"#! /usr/bin/env python\n\nimport argparse\nimport os\nimport numpy as np\nimport json\nfrom voc import parse_voc_annotation\nfrom yolo import create_yolov3_model, dummy_loss\nfrom generator import BatchGenerator\nfrom utils.utils import normalize, evaluate, makedirs\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.optimizers import Adam\nfrom callbacks import CustomModelCheckpoint, CustomTensorBoard\nfrom utils.multi_gpu_model import multi_gpu_model\nimport tensorflow as tf\nimport tensorflow.keras\nfrom tensorflow.keras.models import load_model\n\n\nconfig = tf.compat.v1.ConfigProto(\n gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.9)\n # device_count = {'GPU': 1}\n)\nconfig.gpu_options.allow_growth = True\nsession = tf.compat.v1.Session(config=config)\ntf.compat.v1.keras.backend.set_session(session)\n\n\n'''def prevent_GPU_overflow( ):\n gpu_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nprevent_GPU_overflow( )\n\ndef divide_GPU( LIMIT=4096 ):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n # Restrict TensorFlow to only allocate 1GB of memory on the first GPU\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT),\n tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT)])\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n print(e)\n\ndivide_GPU( LIMIT=8192 )'''\n\n\ndef create_training_instances(\n train_annot_folder,\n train_image_folder,\n train_cache,\n valid_annot_folder,\n valid_image_folder,\n valid_cache,\n labels,\n):\n # parse annotations of the training set\n train_ints, train_labels = parse_voc_annotation(train_annot_folder, train_image_folder, train_cache, labels)\n\n # parse annotations of the validation set, if any, otherwise split the training set\n if os.path.exists(valid_annot_folder):\n valid_ints, valid_labels = parse_voc_annotation(valid_annot_folder, valid_image_folder, valid_cache, labels)\n else:\n print(\"valid_annot_folder not exists. Spliting the trainining set.\")\n\n train_valid_split = int(0.8*len(train_ints))\n np.random.seed(0)\n np.random.shuffle(train_ints)\n np.random.seed()\n\n valid_ints = train_ints[train_valid_split:]\n train_ints = train_ints[:train_valid_split]\n\n # compare the seen labels with the given labels in config.json\n if len(labels) > 0:\n overlap_labels = set(labels).intersection(set(train_labels.keys()))\n\n print('Seen labels: \\t' + str(train_labels) + '\\n')\n print('Given labels: \\t' + str(labels))\n\n # return None, None, None if some given label is not in the dataset\n if len(overlap_labels) < len(labels):\n print('Some labels have no annotations! Please revise the list of labels in the config.json.')\n return None, None, None\n else:\n print('No labels are provided. Train on all seen labels.')\n print(train_labels)\n labels = train_labels.keys()\n\n max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])\n\n return train_ints, valid_ints, sorted(labels), max_box_per_image\n\ndef create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):\n makedirs(tensorboard_logs)\n \n early_stop = EarlyStopping(\n monitor = 'loss', \n min_delta = 0.01, \n patience = 7, \n mode = 'min', \n verbose = 1\n )\n checkpoint = CustomModelCheckpoint(\n model_to_save = model_to_save,\n filepath = saved_weights_name,# + '{epoch:02d}.h5', \n monitor = 'loss', \n verbose = 1, \n save_best_only = True, #False \n mode = 'min', \n period = 1\n )\n reduce_on_plateau = ReduceLROnPlateau(\n monitor = 'loss',\n factor = 0.1,\n patience = 2,\n verbose = 1,\n mode = 'min',\n epsilon = 0.01,\n cooldown = 0,\n min_lr = 0\n )\n tensorboard = CustomTensorBoard(\n log_dir = tensorboard_logs,\n write_graph = True,\n write_images = True,\n ) \n stop_on_nan = tf.keras.callbacks.TerminateOnNaN()\n return [early_stop, checkpoint, reduce_on_plateau, tensorboard, stop_on_nan]\n\ndef create_model(\n nb_class, \n anchors, \n max_box_per_image, \n max_grid, batch_size, \n warmup_batches, \n ignore_thresh, \n multi_gpu, \n saved_weights_name, \n lr,\n grid_scales,\n obj_scale,\n noobj_scale,\n xywh_scale,\n class_scale \n):\n if multi_gpu > 1:\n with tf.device('/cpu:0'):\n template_model, infer_model = create_yolov3_model(\n nb_class = nb_class, \n anchors = anchors, \n max_box_per_image = max_box_per_image, \n max_grid = max_grid, \n batch_size = batch_size//multi_gpu, \n warmup_batches = warmup_batches,\n ignore_thresh = ignore_thresh,\n grid_scales = grid_scales,\n obj_scale = obj_scale,\n noobj_scale = noobj_scale,\n xywh_scale = xywh_scale,\n class_scale = class_scale\n )\n else:\n template_model, infer_model = create_yolov3_model(\n nb_class = nb_class, \n anchors = anchors, \n max_box_per_image = max_box_per_image, \n max_grid = max_grid, \n batch_size = batch_size, \n warmup_batches = warmup_batches,\n ignore_thresh = ignore_thresh,\n grid_scales = grid_scales,\n obj_scale = obj_scale,\n noobj_scale = noobj_scale,\n xywh_scale = xywh_scale,\n class_scale = class_scale\n ) \n\n # load the pretrained weight if exists, otherwise load the backend weight only\n if os.path.exists(saved_weights_name): \n print(\"\\nLoading pretrained weights.\\n\")\n template_model.load_weights(saved_weights_name)\n else:\n template_model.load_weights(\"backend.h5\", by_name=True) \n\n if multi_gpu > 1:\n train_model = multi_gpu_model(template_model, gpus=multi_gpu)\n else:\n train_model = template_model \n\n #optimizer = Adam(lr=lr, clipnorm=0.001)\n optimizer = tensorflow.keras.optimizers.RMSprop(lr=lr)\n train_model.compile(loss=dummy_loss, optimizer=optimizer) \n\n return train_model, infer_model\n\ndef _main_(args):\n config_path = args.conf\n\n with open(config_path) as config_buffer: \n config = json.loads(config_buffer.read())\n\n ###############################\n # Parse the annotations \n ###############################\n train_ints, valid_ints, labels, max_box_per_image = create_training_instances(\n config['train']['train_annot_folder'],\n config['train']['train_image_folder'],\n config['train']['cache_name'],\n config['valid']['valid_annot_folder'],\n config['valid']['valid_image_folder'],\n config['valid']['cache_name'],\n config['model']['labels']\n )\n print('\\nTraining on: \\t' + str(labels) + '\\n')\n\n ###############################\n # Create the generators \n ############################### \n train_generator = BatchGenerator(\n instances = train_ints, \n anchors = config['model']['anchors'], \n labels = labels, \n downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3\n max_box_per_image = max_box_per_image,\n batch_size = config['train']['batch_size'],\n min_net_size = config['model']['min_input_size'],\n max_net_size = config['model']['max_input_size'], \n shuffle = True, \n jitter = 0.3, \n norm = normalize\n )\n \n valid_generator = BatchGenerator(\n instances = valid_ints, \n anchors = config['model']['anchors'], \n labels = labels, \n downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3\n max_box_per_image = max_box_per_image,\n batch_size = config['train']['batch_size'],\n min_net_size = config['model']['min_input_size'],\n max_net_size = config['model']['max_input_size'], \n shuffle = True, \n jitter = 0.0, \n norm = normalize\n )\n\n ###############################\n # Create the model \n ###############################\n if os.path.exists(config['train']['saved_weights_name']): \n config['train']['warmup_epochs'] = 0\n warmup_batches = config['train']['warmup_epochs'] * (config['train']['train_times']*len(train_generator)) \n\n os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']\n multi_gpu = len(config['train']['gpus'].split(','))\n\n train_model, infer_model = create_model(\n nb_class = len(labels), \n anchors = config['model']['anchors'], \n max_box_per_image = max_box_per_image, \n max_grid = [config['model']['max_input_size'], config['model']['max_input_size']], \n batch_size = config['train']['batch_size'], \n warmup_batches = warmup_batches,\n ignore_thresh = config['train']['ignore_thresh'],\n multi_gpu = multi_gpu,\n saved_weights_name = config['train']['saved_weights_name'],\n lr = config['train']['learning_rate'],\n grid_scales = config['train']['grid_scales'],\n obj_scale = config['train']['obj_scale'],\n noobj_scale = config['train']['noobj_scale'],\n xywh_scale = config['train']['xywh_scale'],\n class_scale = config['train']['class_scale'],\n )\n\n ###############################\n # Kick off the training\n ###############################\n callbacks = create_callbacks(config['train']['saved_weights_name'], config['train']['tensorboard_dir'], infer_model)\n\n ### DELIA\n #train_model.summary()\n #tf.keras.backend.set_learning_phase(1) \n #tf.compat.v1.disable_eager_execution()\n tf.compat.v1.keras.backend.get_session().run(tf.compat.v1.global_variables_initializer())\n #######\n\n train_model.fit_generator(\n generator = train_generator, \n steps_per_epoch = len(train_generator) * config['train']['train_times'], \n epochs = config['train']['nb_epochs'] + config['train']['warmup_epochs'], \n verbose = 2 if config['train']['debug'] else 1,\n callbacks = callbacks, \n #workers = 4,\n #max_queue_size = 8\n )\n\n # make a GPU version of infer_model for evaluation\n if multi_gpu > 1:\n infer_model = load_model(config['train']['saved_weights_name'])\n\n ###############################\n # Run the evaluation\n ############################### \n # compute mAP for all the classes\n average_precisions = evaluate(infer_model, valid_generator)\n\n # print the score\n for label, average_precision in average_precisions.items():\n print(labels[label] + ': {:.4f}'.format(average_precision))\n print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions))) \n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='train and evaluate YOLO_v3 model on any dataset')\n argparser.add_argument('-c', '--conf', help='path to configuration file') \n\n args = argparser.parse_args()\n _main_(args)\n"
] | [
[
"tensorflow.keras.callbacks.TerminateOnNaN",
"tensorflow.compat.v1.Session",
"numpy.random.shuffle",
"tensorflow.compat.v1.GPUOptions",
"tensorflow.keras.models.load_model",
"tensorflow.compat.v1.keras.backend.get_session",
"tensorflow.device",
"numpy.random.seed",
"tensorflow.compat.v1.keras.backend.set_session",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.keras.callbacks.ReduceLROnPlateau"
]
] |
KiryanovKD/models | [
"e17080247e3c9b3301680f61b8f4815c22509e7e"
] | [
"official/nlp/modeling/networks/albert_encoder.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.\"\"\"\n# pylint: disable=g-classes-have-attributes\nimport collections\nimport tensorflow as tf\n\nfrom official.modeling import activations\nfrom official.nlp.modeling import layers\n\n\[email protected]_keras_serializable(package='Text')\nclass AlbertEncoder(tf.keras.Model):\n \"\"\"ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.\n\n This network implements the encoder described in the paper \"ALBERT: A Lite\n BERT for Self-supervised Learning of Language Representations\"\n (https://arxiv.org/abs/1909.11942).\n\n Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes\n embedding parameters into two smaller matrices and shares parameters\n across layers.\n\n The default values for this object are taken from the ALBERT-Base\n implementation described in the paper.\n\n *Note* that the network is constructed by Keras Functional API.\n\n Args:\n vocab_size: The size of the token vocabulary.\n embedding_width: The width of the word embeddings. If the embedding width is\n not equal to hidden size, embedding parameters will be factorized into two\n matrices in the shape of `(vocab_size, embedding_width)` and\n `(embedding_width, hidden_size)`, where `embedding_width` is usually much\n smaller than `hidden_size`.\n hidden_size: The size of the transformer hidden layers.\n num_layers: The number of transformer layers.\n num_attention_heads: The number of attention heads for each transformer. The\n hidden size must be divisible by the number of attention heads.\n max_sequence_length: The maximum sequence length that this encoder can\n consume. If None, max_sequence_length uses the value from sequence length.\n This determines the variable shape for positional embeddings.\n type_vocab_size: The number of types that the 'type_ids' input can take.\n intermediate_size: The intermediate size for the transformer layers.\n activation: The activation to use for the transformer layers.\n dropout_rate: The dropout rate to use for the transformer layers.\n attention_dropout_rate: The dropout rate to use for the attention layers\n within the transformer layers.\n initializer: The initialzer to use for all weights in this encoder.\n dict_outputs: Whether to use a dictionary as the model outputs.\n \"\"\"\n\n def __init__(self,\n vocab_size,\n embedding_width=128,\n hidden_size=768,\n num_layers=12,\n num_attention_heads=12,\n max_sequence_length=512,\n type_vocab_size=16,\n intermediate_size=3072,\n activation=activations.gelu,\n dropout_rate=0.1,\n attention_dropout_rate=0.1,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n dict_outputs=False,\n **kwargs):\n activation = tf.keras.activations.get(activation)\n initializer = tf.keras.initializers.get(initializer)\n\n word_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_word_ids')\n mask = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_mask')\n type_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_type_ids')\n\n if embedding_width is None:\n embedding_width = hidden_size\n embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n name='word_embeddings')\n word_embeddings = embedding_layer(word_ids)\n\n # Always uses dynamic slicing for simplicity.\n position_embedding_layer = layers.PositionEmbedding(\n initializer=initializer,\n max_length=max_sequence_length,\n name='position_embedding')\n position_embeddings = position_embedding_layer(word_embeddings)\n\n type_embeddings = (\n layers.OnDeviceEmbedding(\n vocab_size=type_vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n use_one_hot=True,\n name='type_embeddings')(type_ids))\n\n embeddings = tf.keras.layers.Add()(\n [word_embeddings, position_embeddings, type_embeddings])\n embeddings = (\n tf.keras.layers.LayerNormalization(\n name='embeddings/layer_norm',\n axis=-1,\n epsilon=1e-12,\n dtype=tf.float32)(embeddings))\n embeddings = (tf.keras.layers.Dropout(rate=dropout_rate)(embeddings))\n # We project the 'embedding' output to 'hidden_size' if it is not already\n # 'hidden_size'.\n if embedding_width != hidden_size:\n embeddings = tf.keras.layers.experimental.EinsumDense(\n '...x,xy->...y',\n output_shape=hidden_size,\n bias_axes='y',\n kernel_initializer=initializer,\n name='embedding_projection')(\n embeddings)\n\n data = embeddings\n attention_mask = layers.SelfAttentionMask()(data, mask)\n shared_layer = layers.TransformerEncoderBlock(\n num_attention_heads=num_attention_heads,\n inner_dim=intermediate_size,\n inner_activation=activation,\n output_dropout=dropout_rate,\n attention_dropout=attention_dropout_rate,\n kernel_initializer=initializer,\n name='transformer')\n encoder_outputs = []\n for _ in range(num_layers):\n data = shared_layer([data, attention_mask])\n encoder_outputs.append(data)\n\n # Applying a tf.slice op (through subscript notation) to a Keras tensor\n # like this will create a SliceOpLambda layer. This is better than a Lambda\n # layer with Python code, because that is fundamentally less portable.\n first_token_tensor = data[:, 0, :]\n cls_output = tf.keras.layers.Dense(\n units=hidden_size,\n activation='tanh',\n kernel_initializer=initializer,\n name='pooler_transform')(\n first_token_tensor)\n if dict_outputs:\n outputs = dict(\n sequence_output=data,\n encoder_outputs=encoder_outputs,\n pooled_output=cls_output,\n )\n else:\n outputs = [data, cls_output]\n\n # b/164516224\n # Once we've created the network using the Functional API, we call\n # super().__init__ as though we were invoking the Functional API Model\n # constructor, resulting in this object having all the properties of a model\n # created using the Functional API. Once super().__init__ is called, we\n # can assign attributes to `self` - note that all `self` assignments are\n # below this line.\n super(AlbertEncoder, self).__init__(\n inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)\n config_dict = {\n 'vocab_size': vocab_size,\n 'embedding_width': embedding_width,\n 'hidden_size': hidden_size,\n 'num_layers': num_layers,\n 'num_attention_heads': num_attention_heads,\n 'max_sequence_length': max_sequence_length,\n 'type_vocab_size': type_vocab_size,\n 'intermediate_size': intermediate_size,\n 'activation': tf.keras.activations.serialize(activation),\n 'dropout_rate': dropout_rate,\n 'attention_dropout_rate': attention_dropout_rate,\n 'initializer': tf.keras.initializers.serialize(initializer),\n }\n\n # We are storing the config dict as a namedtuple here to ensure checkpoint\n # compatibility with an earlier version of this model which did not track\n # the config dict attribute. TF does not track immutable attrs which\n # do not contain Trackables, so by creating a config namedtuple instead of\n # a dict we avoid tracking it.\n config_cls = collections.namedtuple('Config', config_dict.keys())\n self._config = config_cls(**config_dict)\n self._embedding_layer = embedding_layer\n self._position_embedding_layer = position_embedding_layer\n\n def get_embedding_table(self):\n return self._embedding_layer.embeddings\n\n def get_config(self):\n return dict(self._config._asdict())\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.activations.serialize",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.experimental.EinsumDense",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.activations.get",
"tensorflow.keras.layers.Input"
]
] |
dataiku-research/paper_ial_2021 | [
"f860b6eb2d8471bc23e44d282e50c4deaf0813d9"
] | [
"exp/experimenter.py"
] | [
"import json\nfrom importlib import import_module\nimport pandas as pd\nfrom pandas.errors import EmptyDataError\nimport numpy as np\nimport pickle\nimport shutil\nimport time\nfrom pathlib import Path\nfrom collections import defaultdict\n\n\nclass CsvValue:\n\n def __init__(self, path):\n self.path = path\n try:\n self._data = pd.read_csv(self.path)\n # Make all columns not called value as index\n self._data.set_index(self._data.columns.drop('value').to_list(), inplace=True)\n except (FileNotFoundError, EmptyDataError):\n self._data = None\n\n def upsert(self, index, value):\n if self._data is None:\n self._data = pd.DataFrame([{**index, 'value': value}])\n self._data.set_index(self._data.columns.drop('value').to_list(), inplace=True)\n else:\n # Check that the index match\n diff = set(index.keys()).difference(set(self._data.index.names))\n if len(diff) != 0:\n raise ValueError('Index mismatch between DB and query: {}'.format(diff))\n \n # Now we just need to update the value if already there otherwise add it\n loc = tuple([index[k] for k in self._data.index.names])\n try:\n self._data.at[loc, 'value'] = value\n except KeyError:\n self._data = self._data.append(pd.DataFrame([[value]], columns=['value'], index=[loc]))\n self._data.to_csv(self.path)\n\n\nclass CsvDb:\n def __init__(self, folder):\n self.folder = Path(folder)\n self._values = dict()\n if not self.folder.exists():\n self.folder.mkdir()\n else:\n for f in self.folder.iterdir():\n if f.is_dir():\n continue\n self._values[f.stem] = CsvValue(str(f))\n \n def upsert(self, key, index, value):\n if not key in self._values:\n self._values[key] = CsvValue(str(self.folder / (key + '.csv')))\n self._values[key].upsert(index, value)\n\n\nclass Experiment():\n\n def __init__(self, db, seed, path='./cache', force=False, verbose=0):\n self.db = CsvDb(db)\n self.seed = seed\n self.path = Path(path) / str(seed)\n if not self.path.exists():\n self.path.mkdir(exist_ok=True, parents=True)\n self.verbose = verbose\n self._memory = defaultdict(dict)\n self.force = force\n \n def _log(self, verbosity, message):\n if self.verbose >= verbosity:\n print(message)\n\n def log_value(self, config, key, value):\n self.db.upsert(key, config, value)\n\n def _load(self, iter_id, name, tmp=False):\n if tmp:\n filebase = self.path / str(iter_id) / 'tmp' / name\n else:\n filebase = self.path / str(iter_id) / name \n if filebase.with_suffix('.npy').exists():\n value = np.load(filebase.with_suffix('.npy'))\n elif filebase.with_suffix('.krs').exists():\n from keras.models import load_model\n value = load_model(filebase.with_suffix('.krs'))\n elif filebase.with_suffix('.pkl').exists():\n with open(filebase.with_suffix('.pkl'), 'rb') as filedesc:\n value = pickle.load(filedesc)\n else:\n raise ValueError('Could not load variable {}.{}'.format(iter_id, name))\n self._memory[iter_id][name] = value\n return value\n\n def _save_value_at(self, iter_id, name, value, tmp=False):\n self._memory[iter_id][name] = value\n\n if tmp:\n filebase = self.path / str(iter_id) / 'tmp' / name\n else:\n filebase = self.path / str(iter_id) / name\n if type(value).__module__ == np.__name__:\n np.save(filebase.with_suffix('.npy'), value)\n elif 'keras' in value.__module__.split('.'):\n from keras.models import save_model\n save_model(value, filebase.with_suffix('.krs'))\n else:\n with open(filebase.with_suffix('.pkl'), 'wb') as f:\n pickle.dump(value, f)\n\n def retrieve_value_at(self, iter_id, name, first=None):\n self._log(2, 'Retrieving {} {}'.format(iter_id, name))\n if self.first:\n return first\n if iter_id in self._memory and name in self._memory[iter_id]:\n return self._memory[iter_id][name]\n return self._load(iter_id, name)\n\n def persist_value_at(self, iter_id, name, value):\n self._log(2, 'Persisting {} {}'.format(iter_id, name))\n self._memory[iter_id][name] = value\n self._save_value_at(iter_id, name, value)\n\n def resume_value_at(self, iter_id, name, first=None):\n self._log(2, 'Resuming {} {}'.format(iter_id, name))\n if self.first:\n return first\n if iter_id in self._memory and name in self._memory[iter_id]:\n return self._memory[iter_id][name]\n return self._load(iter_id, name, tmp=True)\n\n def cache_value_at(self, iter_id, name, value):\n self._log(2, 'Caching {} {}'.format(iter_id, name))\n\n self._memory[iter_id][name] = value\n self._save_value_at(iter_id, name, value, tmp=True)\n\n def iter(self, items, force_recompute=False):\n\n previous_iter_id = None\n self.first = True\n self._memory = defaultdict(dict)\n\n for current_iter_id in items:\n tmp_path = self.path / str(current_iter_id) / 'tmp'\n tmp_path.mkdir(exist_ok=True, parents=True)\n summary_path = self.path / str(current_iter_id) / 'completed.json'\n\n if summary_path.exists() and not self.force:\n self._log(1, 'Iteration {} already computed.'.format(current_iter_id))\n self.first = False\n continue\n\n t0 = time.time()\n yield current_iter_id\n delta = time.time() - t0\n\n with open(str(summary_path), 'w') as f:\n json.dump(dict(duration=delta), f)\n \n if previous_iter_id is not None:\n del self._memory[previous_iter_id]\n tmp_path = self.path / str(previous_iter_id) / 'tmp'\n if tmp_path.exists():\n shutil.rmtree(str(tmp_path))\n \n previous_iter_id = current_iter_id\n self.first = False\n\n tmp_path = self.path / str(previous_iter_id) / 'tmp'\n if tmp_path.exists():\n shutil.rmtree(str(tmp_path))\n del self._memory\n self._memory = defaultdict(dict)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
seo-dev/cvml_project | [
"7c95ce22db6f31dc4624af9417edffde021b5351"
] | [
"segmentation/eval.py"
] | [
"import os\nfrom segmentation.cityscape_reader import CityscapesDemoDataset\nimport tensorflow as tf\nimport argparse\nimport numpy as np\nimport cv2\n\nfrom segmentation.labels import cityscapes_mask_colors\nfrom segmentation.model import DeeplabV3\n\nparser = argparse.ArgumentParser(description=\"Cityscapes\")\nparser.add_argument('--project_name', default=\"segmentation_cityscapes\")\nparser.add_argument('--identifier', default=\"deeplabv3_densenet121\")\nparser.add_argument('--data_dir', required=True, help=\"path data root\")\n\n\ndef label2rgb(label, img=None, alpha=0.5, cmap=None):\n label_rgb = cmap[label]\n if img is not None:\n label_rgb = alpha * label_rgb + (1 - alpha) * img\n label_rgb = label_rgb.astype(np.uint8)\n return label_rgb\n\n\[email protected]\ndef predict(model, inputs):\n logits = model(inputs, training=False)\n return logits\n\n\ndef val(model, dataset, save_dir):\n for i, (rgb, inputs, img_path) in enumerate(dataset):\n rgb = tf.squeeze(rgb).numpy()\n\n # Predict\n logits = predict(model, inputs)\n pred = tf.squeeze(tf.argmax(logits, -1)).numpy().astype(np.uint8)\n\n # Save Images\n pred_color = label2rgb(pred, img=rgb, cmap=cityscapes_mask_colors)\n mask_path = os.path.join(save_dir, f'{int(i):04d}.png')\n cv2.imwrite(mask_path, cv2.cvtColor(pred_color, cv2.COLOR_RGB2BGR))\n\n\n\ndef evaluate(args):\n project_dir = os.getcwd()\n output_dir = os.path.join(project_dir, 'results', args.identifier)\n save_dir = os.path.join(output_dir, 'demo')\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n model = DeeplabV3(input_shape=None)\n\n ckpt = tf.train.Checkpoint(step=tf.Variable(1), net=model)\n manager = tf.train.CheckpointManager(ckpt, output_dir, max_to_keep=3)\n ckpt.restore(manager.latest_checkpoint).expect_partial()\n if manager.latest_checkpoint:\n print(\"Restored from {}\".format(manager.latest_checkpoint))\n else:\n print(\"No weights to Restores.\")\n raise\n\n val_dataset = CityscapesDemoDataset(args.data_dir, sequence='stuttgart_02')\n val_dataset = val_dataset.load_tfdataset()\n\n val(model, val_dataset, save_dir)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n evaluate(args)\n"
] | [
[
"tensorflow.squeeze",
"tensorflow.argmax",
"tensorflow.train.CheckpointManager",
"tensorflow.Variable"
]
] |
berquist/eg | [
"4c368b12eaaffcf0af8032f10348cf8bc1c3957a"
] | [
"python/keras/masking.py"
] | [
"from pprint import pprint\n\nimport numpy as np\n\nfrom keras.models import Model\nfrom keras.layers import Activation, Dense, Input, Masking, TimeDistributed\n\n\nif __name__ == \"__main__\":\n\n inp = Input(shape=(3, 6))\n mask = Masking(mask_value=0.1)(inp)\n out = TimeDistributed(Dense(1, activation=\"linear\"))(mask)\n model = Model(inputs=inp, outputs=out)\n\n print(\"Architecture\")\n model.summary()\n\n model.set_weights(\n [\n np.array([[1.0], [1.0], [1.0], [1.0], [1.0], [1.0]], dtype=np.float32),\n np.array([0.0], dtype=np.float32),\n ]\n )\n\n print(\"Weights\")\n pprint(model.get_weights())\n\n data = np.array(\n [[[3, 1, 2, 2, 0.1, 0.1], [0, 0, 0, 0, 0, 0], [2, 1, 1, 2, 0.1, 0.1]]]\n )\n p = model.predict(data)\n print(p)\n\n # Masking only works when all features of a timestep are equal to the mask\n # value.\n #\n # From https://github.com/keras-team/keras/issues/3086#issuecomment-526057828\n data = np.array(\n [[[3, 1, 2, 2, 0.1, 0.1], [0, 0, 0, 0, 0, 0], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]]\n )\n p = model.predict(data)\n print(p)\n"
] | [
[
"numpy.array"
]
] |
DmitryUlyanov/deeppy | [
"79cc7cb552f30bc70eeea9ee7ff4976b0899ea66"
] | [
"examples/siamese_mnist.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nSiamese networks\n================\n\n\"\"\"\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import offsetbox\nimport deeppy as dp\n\n# Fetch MNIST data\ndataset = dp.dataset.MNIST()\nx_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)\n\n# Normalize pixel intensities\nscaler = dp.StandardScaler()\nx_train = scaler.fit_transform(x_train)\nx_test = scaler.transform(x_test)\n\n# Generate image pairs\nn_pairs = 100000\nx1 = np.empty((n_pairs, 28*28), dtype=dp.float_)\nx2 = np.empty_like(x1, dtype=dp.float_)\ny = np.empty(n_pairs, dtype=dp.int_)\nn_imgs = x_train.shape[0]\nn = 0\nwhile n < n_pairs:\n i = random.randint(0, n_imgs-1)\n j = random.randint(0, n_imgs-1)\n if i == j:\n continue\n x1[n, ...] = x_train[i]\n x2[n, ...] = x_train[j]\n if y_train[i] == y_train[j]:\n y[n] = 1\n else:\n y[n] = 0\n n += 1\n\n# Prepare network inputs\ntrain_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)\n\n# Setup network\nw_gain = 1.5\nw_decay = 1e-4\nnet = dp.SiameseNetwork(\n siamese_layers=[\n dp.FullyConnected(\n n_out=1024,\n weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),\n ),\n dp.ReLU(),\n dp.FullyConnected(\n n_out=1024,\n weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),\n ),\n dp.ReLU(),\n dp.FullyConnected(\n n_out=2,\n weights=dp.Parameter(dp.AutoFiller(w_gain)),\n ),\n ],\n loss=dp.ContrastiveLoss(margin=1.0),\n)\n\n# Train network\ntrainer = dp.StochasticGradientDescent(\n max_epochs=15,\n learn_rule=dp.RMSProp(learn_rate=0.01),\n)\ntrainer.train(net, train_input)\n\n# Plot 2D embedding\ntest_input = dp.Input(x_test)\nx_test = np.reshape(x_test, (-1,) + dataset.img_shape)\nfeat = net.features(test_input)\nfeat -= np.min(feat, 0)\nfeat /= np.max(feat, 0)\n\nplt.figure()\nax = plt.subplot(111)\nshown_images = np.array([[1., 1.]])\nfor i in range(feat.shape[0]):\n dist = np.sum((feat[i] - shown_images)**2, 1)\n if np.min(dist) < 6e-4:\n # don't show points that are too close\n continue\n shown_images = np.r_[shown_images, [feat[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(x_test[i], zoom=0.6, cmap=plt.cm.gray_r),\n xy=feat[i], frameon=False\n )\n ax.add_artist(imagebox)\n\nplt.xticks([]), plt.yticks([])\nplt.title('Embedding from the last layer of the network')\n"
] | [
[
"numpy.sum",
"matplotlib.offsetbox.OffsetImage",
"numpy.empty",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"numpy.reshape",
"numpy.empty_like",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.max",
"numpy.min",
"numpy.array",
"matplotlib.pyplot.yticks"
]
] |
lucas-sancere/DRFNS | [
"a35e01d516e9b491c09eaca6701e7e0fe9e56880"
] | [
"src_RealData/Data/CreateTFRecords.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom DataGenRandomT import DataGenRandomT\nfrom DataGenClass import DataGen3, DataGenMulti, DataGen3reduce\nimport numpy as np\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef CreateTFRecord(OUTNAME, PATH, CROP, SIZE,\n TRANSFORM_LIST, UNET, MEAN_FILE, \n SEED, TEST_PATIENT, N_EPOCH, TYPE = \"Normal\",\n SPLIT=\"train\"):\n \"\"\"\n Takes a DataGen object and creates an associated TFRecord file. \n We do not perform data augmentation on the fly but save the \n augmented images in the record. Most of the parameters here \n reference paramaters of the DataGen object. In particular, PATH,\n CROP, SIZE, TRANSFORM_LIST, UNET, SEED and TEST_PATIENT. \n OUTNAME is the name of the record.\n \"\"\"\n\n tfrecords_filename = OUTNAME\n writer = tf.io.TFRecordWriter(tfrecords_filename)\n\n \n if TYPE == \"Normal\":\n DG = DataGenRandomT(PATH, split=SPLIT, crop=CROP, size=SIZE,\n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n\n elif TYPE == \"3class\":\n DG = DataGen3(PATH, split=SPLIT, crop = CROP, size=SIZE, \n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n elif TYPE == \"ReducedClass\":\n DG = DataGen3reduce(PATH, split=SPLIT, crop = CROP, size=SIZE, \n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n elif TYPE == \"JUST_READ\":\n DG = DataGenMulti(PATH, split=SPLIT, crop = CROP, size=SIZE, \n transforms=TRANSFORM_LIST, UNet=UNET, num=TEST_PATIENT,\n mean_file=MEAN_FILE, seed_=SEED)\n\n DG.SetPatient(TEST_PATIENT)\n N_ITER_MAX = N_EPOCH * DG.length\n\n original_images = []\n key = DG.RandomKey(False)\n if not UNET:\n for _ in range(N_ITER_MAX):\n key = DG.NextKeyRandList(0)\n img, annotation = DG[key]\n # img = img.astype(np.uint8)\n annotation = annotation.astype(np.uint8)\n height = img.shape[0]\n width = img.shape[1]\n \n original_images.append((img, annotation))\n \n img_raw = img.tostring()\n annotation_raw = annotation.tostring()\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'image_raw': _bytes_feature(img_raw),\n 'mask_raw': _bytes_feature(annotation_raw)}))\n \n writer.write(example.SerializeToString())\n else:\n for _ in range(N_ITER_MAX):\n key = DG.NextKeyRandList(0)\n img, annotation = DG[key]\n # img = img.astype(np.uint8)\n annotation = annotation.astype(np.uint8)\n height_img = img.shape[0]\n width_img = img.shape[1]\n\n height_mask = annotation.shape[0]\n width_mask = annotation.shape[1]\n \n original_images.append((img, annotation))\n \n img_raw = img.tostring()\n annotation_raw = annotation.tostring()\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'height_img': _int64_feature(height_img),\n 'width_img': _int64_feature(width_img),\n 'height_mask': _int64_feature(height_mask),\n 'width_mask': _int64_feature(width_mask),\n 'image_raw': _bytes_feature(img_raw),\n 'mask_raw': _bytes_feature(annotation_raw)}))\n \n writer.write(example.SerializeToString())\n\n\n writer.close()\n"
] | [
[
"tensorflow.train.BytesList",
"tensorflow.train.Int64List",
"tensorflow.io.TFRecordWriter"
]
] |
shreyasvj25/turicreate | [
"dd210c2563930881abd51fd69cb73007955b33fd"
] | [
"src/unity/python/turicreate/test/test_graph.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nfrom ..data_structures.sgraph import SGraph, Vertex, Edge, load_sgraph\nfrom ..data_structures.sframe import SFrame\nfrom . import util\n\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport unittest\nimport tempfile\nimport json\nimport os\n\nimport sys\nif sys.version_info.major > 2:\n unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual\n\nclass GraphTests(unittest.TestCase):\n def setUp(self):\n self.vertices = pd.DataFrame({\n 'vid': ['1', '2', '3'],\n 'color': ['g', None, 'b'],\n 'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})\n self.edges = pd.DataFrame({\n 'src_id': ['1', '2', '3'],\n 'dst_id': ['2', '3', '4'],\n 'weight': [0., None, 1.]})\n\n def test_empty_graph(self):\n g = SGraph()\n self.assertEqual(g.summary(), {'num_vertices': 0, 'num_edges': 0})\n self.assertEqual(len(g.get_fields()), 3)\n self.assertTrue(g.get_vertices(format='sframe').shape, (0, 1))\n self.assertTrue(g.get_edges(format='sframe').shape, (0, 2))\n self.assertTrue(g.vertices.shape, (0, 1))\n self.assertTrue(g.edges.shape, (0, 2))\n self.assertTrue(len(g.get_vertices(format='list')) == 0)\n self.assertTrue(len(g.get_edges(format='list')) == 0)\n\n def test_graph_constructor(self):\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n g2 = SGraph(g.vertices, g.edges)\n g3 = SGraph(g.vertices, g.edges, src_field=\"__dst_id\", dst_field=\"__src_id\") #flip around src and dst\n assert_frame_equal(g.vertices.to_dataframe().sort_values('__id').reset_index(drop=True),\n g2.vertices.to_dataframe().sort_values('__id').reset_index(drop=True))\n assert_frame_equal(g.edges.to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True),\n g2.edges.to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges)))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), 'vid', '__src_id', '__dst_id'))\n self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), vid_field=None, src_field='src_id', dst_field='dst_id'))\n\n def test_simple_graph(self):\n for input_type in [pd.DataFrame, SFrame, list]:\n g = SGraph()\n if input_type is list:\n vertices = [Vertex(x[1]['vid'], {'color': x[1]['color'], 'vec': x[1]['vec']}) for x in self.vertices.iterrows()]\n edges = [Edge(x[1]['src_id'], x[1]['dst_id'], {'weight': x[1]['weight']}) for x in self.edges.iterrows()]\n g = g.add_vertices(vertices)\n g = g.add_edges(edges)\n else:\n g = g.add_vertices(input_type(self.vertices), vid_field='vid')\n g = g.add_edges(input_type(self.edges), src_field='src_id', dst_field='dst_id')\n self.assertEqual(g.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g.get_fields(), ['__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'])\n self.assertItemsEqual(g.get_vertices(format='dataframe').columns.values, ['color', 'vec'])\n self.assertItemsEqual(g.get_edges(format='dataframe').columns.values, ['__src_id', '__dst_id', 'weight'])\n self.assertTrue(g.get_edges(format='dataframe').shape, (3, 3))\n self.assertTrue(g.get_vertices(format='dataframe').shape, (4, 3))\n self.assertTrue(g.get_vertices(format='dataframe', fields={'color': 'g'}).shape, (1, 2))\n self.assertTrue(g.get_edges(format='dataframe', fields={'weight': 0.}).shape, (1, 3))\n\n self.assertItemsEqual(g.get_vertices(format='sframe').column_names(), ['__id', 'color', 'vec'])\n self.assertItemsEqual(g.get_edges(format='sframe').column_names(), ['__src_id', '__dst_id', 'weight'])\n self.assertTrue(g.get_edges(format='sframe').shape, (3, 3))\n self.assertTrue(g.get_vertices(format='sframe').shape, (4, 3))\n self.assertTrue(g.get_vertices(format='sframe', fields={'color': 'g'}).shape, (1, 2))\n self.assertTrue(g.get_edges(format='sframe', fields={'weight': 0.}).shape, (1, 3))\n\n vertices = g.get_vertices(format='list')\n edges = g.get_edges(format='list')\n self.assertEqual(len(vertices), 4)\n self.assertEqual(len(edges), 3)\n\n # get edges is lazy\n edges = g.get_edges()\n self.assertFalse(edges.__is_materialized__())\n\n def test_vertex_query(self):\n df = pd.DataFrame({'src': ['a', 'c', 'b', 'd', 'c', 'e', 'g', 'f'],\n 'dst': ['b', 'b', 'd', 'c', 'e', 'g', 'f', 'e']})\n g = SGraph().add_edges(df, src_field='src', dst_field='dst')\n\n # basic check\n g2 = g.get_neighborhood(ids=['b'], radius=1, full_subgraph=False)\n out = g2.get_edges(format='dataframe')\n out.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n out.index = range(len(out))\n\n correct = pd.DataFrame.from_records([('b', 'd'),\n ('a', 'b'),\n ('c', 'b')],\n columns=['__src_id', '__dst_id'])\n correct.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n correct.index = range(len(correct))\n assert_frame_equal(out, correct, check_dtype=False)\n\n # check larger radius, full subgraph, and multiple vertices\n g2 = g.get_neighborhood(ids=['a', 'g'], radius=2, full_subgraph=True)\n out = g2.get_edges(format='dataframe')\n out.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n out.index = range(len(out))\n\n correct = pd.DataFrame.from_records([('a', 'b'),\n ('b', 'd'),\n ('c', 'b'),\n ('c', 'e'),\n ('d', 'c'),\n ('e', 'g'),\n ('f', 'e'),\n ('g', 'f')],\n columns=['__src_id', '__dst_id'])\n correct.sort_values(by=['__src_id', '__dst_id'], axis=0, inplace=True)\n correct.index = range(len(correct))\n assert_frame_equal(out, correct, check_dtype=False)\n\n def test_select_query(self):\n g = SGraph()\n g = g.add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n g2 = g.select_fields([\"color\", \"weight\"])\n self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id', 'weight'])\n g2 = g.select_fields([\"color\"])\n self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id'])\n del g.edges['weight']\n del g.vertices['vec']\n g.vertices['color2'] = g.vertices['color']\n self.assertSequenceEqual((g.get_fields()), ['__id', 'color', 'color2', '__src_id', '__dst_id'])\n g2 = g.select_fields([])\n self.assertSequenceEqual((g2.get_fields()), ['__id', '__src_id', '__dst_id'])\n\n def test_select_query_with_same_vertex_edge_field(self):\n vertices = SFrame({'__id': range(10)})\n edges = SFrame({'__src_id': range(10), '__dst_id': range(1, 11)})\n g = SGraph(vertices, edges)\n g.vertices['weight'] = 0\n g.vertices['v'] = 0\n g.edges['weight'] = 0\n g.edges['e'] = 0\n self.assertItemsEqual(g.get_fields(), ['v', 'e', 'weight', 'weight', '__id', '__src_id', '__dst_id'])\n g2 = g.select_fields('weight')\n self.assertItemsEqual(g2.get_fields(), ['weight', 'weight', '__id', '__src_id', '__dst_id'])\n\n def test_save_load(self):\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n with util.TempDirectory() as f:\n g.save(f)\n g2 = load_sgraph(f, 'binary')\n self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})\n\n with util.TempDirectory() as f:\n g.save(f, format='csv')\n vertices = SFrame.read_csv(f + \"/vertices.csv\")\n edges = SFrame.read_csv(f + \"/edges.csv\")\n g2 = SGraph().add_edges(edges, '__src_id', '__dst_id').add_vertices(vertices, '__id')\n self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})\n self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})\n\n temp_fn = None\n # The delete=False is for Windows sake\n with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:\n temp_fn = f.name\n g.save(f.name)\n with open(f.name, 'r') as f2:\n data = f2.read()\n g2 = json.loads(data)\n self.assertTrue(\"vertices\" in g2)\n self.assertTrue(\"edges\" in g2)\n if os.path.exists(temp_fn):\n os.remove(temp_fn)\n\n def test_load_graph_from_text(self):\n toy_graph_snap = \"\"\"#some comment string\n #some more comment string\n 1\\t2\n 1\\t3\n 2\\t3\n 2\\t1\n 3\\t1\n 3\\t2\"\"\"\n\n toy_graph_tsv = \"\"\"1\\t2\n 1\\t3\n 2\\t3\n 2\\t1\n 3\\t1\n 3\\t2\"\"\"\n toy_graph_csv = \"\"\"1,2\n 1,3\n 2,3\n 2,1\n 3,1\n 3,2\"\"\"\n\n temp_fnames = []\n with tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as fsnap, tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as ftsv, tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as fcsv:\n fsnap.write(toy_graph_snap)\n fsnap.file.flush()\n ftsv.write(toy_graph_tsv)\n ftsv.file.flush()\n fcsv.write(toy_graph_csv)\n fcsv.file.flush()\n for (fname, fmt) in zip([fsnap.name, ftsv.name, fcsv.name], ['snap', 'tsv', 'csv']):\n g = load_sgraph(fname, fmt)\n self.assertEqual(g.summary(), {'num_vertices': 3, 'num_edges': 6})\n temp_fnames.append(fname)\n\n for name in temp_fnames:\n if os.path.exists(name):\n os.remove(name)\n\n def test_robust_parse(self):\n df = pd.DataFrame({'int': [1, 2, 3],\n 'float': [1., 2., 3.],\n 'str': ['one', 'two', 'three'],\n 'nan': [np.nan, np.nan, np.nan],\n 'sparse_int': [1, 2, np.nan],\n 'sparse_float': [np.nan, 2., 3.],\n 'sparse_str': [None, 'two', None]\n })\n g = SGraph().add_vertices(df)\n self.assertItemsEqual(g.get_fields(), df.columns.tolist() + ['__id', '__src_id', '__dst_id'])\n\n df2 = g.get_vertices(format='dataframe')\n sf = g.get_vertices(format='sframe')\n for col in df.columns:\n # potential bug: df2 is missing the 'nan' column.\n if (col != 'nan'):\n self.assertItemsEqual(sorted(list(df2[col].dropna())), sorted(list(df[col].dropna())))\n self.assertItemsEqual(sorted(list(sf[col].dropna())), sorted(list(df[col].dropna())))\n\n def test_missing_value_vids(self):\n vertices = SFrame()\n vertices['vid'] = [1, 2, 3, None]\n edges = SFrame()\n edges['src'] = [1, 2, 3, None]\n edges['dst'] = [4, 4, 4, 4]\n self.assertRaises(RuntimeError, lambda : SGraph().add_vertices(vertices, 'vid').summary())\n self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'src', 'dst').summary())\n self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'dst', 'src').summary())\n\n def test_gframe(self):\n g = SGraph()\n v = g.vertices\n self.assertSequenceEqual(v.column_names(), ['__id'])\n e = g.edges\n self.assertSequenceEqual(e.column_names(), ['__src_id', '__dst_id'])\n\n # Test vertices and edge attributes cannot be modified\n def set_vertices_empty(g):\n g.vertices = SFrame()\n\n def set_edges_empty(g):\n g.edges = SFrame()\n\n def remove_vertices(g):\n del g.vertices\n\n def remove_edges(g):\n del g.edges\n\n def remove_edge_column(gf, name):\n del gf[name]\n\n self.assertRaises(AttributeError, lambda: remove_vertices(g))\n self.assertRaises(AttributeError, lambda: remove_edges(g))\n self.assertRaises(AttributeError, lambda: set_vertices_empty(g))\n self.assertRaises(AttributeError, lambda: set_edges_empty(g))\n\n # Test gframe operations has the same effect as its sframe+graph equivalent\n g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n v = g.vertices\n v['id_col'] = v['__id']\n e = g.edges\n e['src_id_col'] = e['__src_id']\n e['dst_id_col'] = e['__dst_id']\n g2 = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')\n new_vdata = g2.get_vertices()\n new_vdata['id_col'] = new_vdata['__id']\n new_edata = g2.get_edges()\n new_edata['src_id_col'] = new_edata['__src_id']\n new_edata['dst_id_col'] = new_edata['__dst_id']\n g2 = SGraph().add_vertices(new_vdata, '__id').add_edges(new_edata, '__src_id', '__dst_id')\n assert_frame_equal(g.get_vertices().to_dataframe().sort_values('__id').reset_index(drop=True),\n g2.get_vertices().to_dataframe().sort_values('__id').reset_index(drop=True))\n assert_frame_equal(g.get_edges().to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True),\n g2.get_edges().to_dataframe().sort_values(['__src_id', '__dst_id']).reset_index(drop=True))\n\n # check delete a column with exception, and edges is still in a valid state\n self.assertRaises(KeyError, lambda: remove_edge_column(g.edges, 'badcolumn'))\n g.edges.head()\n\n # test slicing\n assert_frame_equal(g.edges[:3].to_dataframe(), g.get_edges()[:3].to_dataframe())\n assert_frame_equal(g.vertices[:3].to_dataframe(), g.get_vertices()[:3].to_dataframe())\n\n # test add row number\n e_expected = g.get_edges().to_dataframe()\n v_expected = g.get_vertices().to_dataframe()\n e_expected['id'] = range(len(e_expected))\n v_expected['id'] = range(len(v_expected))\n\n def test_sframe_le_append_skip_row_bug_is_fixed(self):\n \"\"\"\n This test is actually for SFrame lazy evaluation.\n The reason it is here is because the repro can only be done in SGraph.\n\n The bug appears when the SFrame has lazy_append and when passing through\n the logical filter, skip_rows is not done correctly. So the edge_sframe\n is in a bad state when not materialized.\n\n This unit test stays here to ensure the bug is fixed until we can find\n a more clean repro.\n \"\"\"\n n = 12 # smallest n to repro the le_append bug\n\n # A graph with edge i -> i + 1\n g = SGraph().add_edges(SFrame({'src': range(n), 'dst': range(1, n + 1)}), 'src', 'dst')\n\n lazy_sf = g.get_edges()\n materialized_sf = g.get_edges()\n materialized_sf.__materialize__()\n assert_frame_equal(lazy_sf[lazy_sf['__dst_id'] == n].to_dataframe(), materialized_sf[materialized_sf['__dst_id'] == n].to_dataframe())\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal"
]
] |
anuragreddygv323/raster-vision | [
"14a6495f23bbef0bf7f7c47fb37b856a559b272f"
] | [
"src/rastervision/semseg/tasks/utils.py"
] | [
"\"\"\"Utility functions shared across tasks.\"\"\"\nimport numpy as np\nimport matplotlib as mpl\n# For headless environments\nmpl.use('Agg') # NOQA\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nfrom rastervision.common.utils import plot_img_row\n\n\ndef predict_x(x, model):\n batch_x = np.expand_dims(x, axis=0)\n batch_y = model.predict(batch_x)\n y = np.squeeze(batch_y, axis=0)\n return y\n\n\ndef make_prediction_img(x, target_size, predict):\n \"\"\"Generate a prediction image one window at a time.\n\n Generate a prediction image consisting of a prediction for each pixel. The\n format of that prediction depends on the output of the predict function.\n Passing a very large image as input to a model might\n not be possible due to memory limitations. Instead, we slide a window over\n the image and get the predictions for each window. The individual\n predictions can be combined to create a large prediction image. By\n overlapping the windows, we can discard inaccurate predictions along window\n boundaries.\n\n # Arguments\n x: the full sized image to get a prediction for\n (nb_rows, nb_cols, nb_channels)\n target_size: the window size which needs to be the same as what the\n model expects as input\n predict: a function that takes a window image of size\n target_size and returns the prediction for each pixel\n\n # Returns\n The prediction image\n \"\"\"\n quarter_target_size = target_size // 4\n half_target_size = target_size // 2\n sample_prediction = predict(x[0:target_size, 0:target_size, :])\n nb_channels = sample_prediction.shape[2]\n dtype = sample_prediction.dtype\n\n pad_width = (\n (quarter_target_size, target_size),\n (quarter_target_size, target_size),\n (0, 0))\n\n pad_x = np.pad(x, pad_width, 'edge')\n pad_y = np.zeros(\n (pad_x.shape[0], pad_x.shape[1], nb_channels),\n dtype=dtype)\n\n def update_prediction_center(row_begin, row_end, col_begin, col_end):\n \"\"\"Just update the center half of the window.\"\"\"\n\n x_window = pad_x[row_begin:row_end, col_begin:col_end, :]\n y_window = predict(x_window)\n\n y_window_center = y_window[\n quarter_target_size:target_size - quarter_target_size,\n quarter_target_size:target_size - quarter_target_size,\n :]\n\n pad_y[\n row_begin + quarter_target_size:row_end - quarter_target_size,\n col_begin + quarter_target_size:col_end - quarter_target_size,\n :] = y_window_center\n\n for row_begin in range(0, pad_x.shape[0], half_target_size):\n for col_begin in range(0, pad_x.shape[1], half_target_size):\n row_end = row_begin + target_size\n col_end = col_begin + target_size\n if row_end <= pad_x.shape[0] and col_end <= pad_x.shape[1]:\n update_prediction_center(\n row_begin, row_end, col_begin, col_end)\n\n y = pad_y[quarter_target_size:quarter_target_size+x.shape[0],\n quarter_target_size:quarter_target_size+x.shape[1],\n :]\n return y\n\n\ndef make_legend(label_keys, label_names):\n patches = []\n for label_key, label_name in zip(label_keys, label_names):\n color = tuple(np.array(label_key) / 255.)\n patch = mpatches.Patch(\n facecolor=color, edgecolor='black', linewidth=0.5,\n label=label_name)\n patches.append(patch)\n plt.legend(handles=patches, loc='upper left',\n bbox_to_anchor=(1, 1), fontsize=4)\n\n\ndef plot_prediction(generator, all_x, y, pred,\n file_path, is_debug=False):\n dataset = generator.dataset\n fig = plt.figure()\n\n nb_subplot_cols = 3\n if is_debug:\n nb_subplot_cols += len(generator.active_input_inds)\n\n grid_spec = mpl.gridspec.GridSpec(1, nb_subplot_cols)\n\n all_x = generator.calibrate_image(all_x)\n rgb_input_im = all_x[:, :, dataset.rgb_inds]\n imgs = [rgb_input_im]\n titles = ['RGB']\n\n if is_debug:\n ir_im = all_x[:, :, dataset.ir_ind]\n imgs.append(ir_im)\n titles.append('IR')\n\n depth_im = all_x[:, :, dataset.depth_ind]\n imgs.append(depth_im)\n titles.append('Depth')\n\n ndvi_im = all_x[:, :, dataset.ndvi_ind]\n imgs.append(ndvi_im)\n titles.append('NDVI')\n\n imgs.append(y)\n titles.append('Ground Truth')\n\n imgs.append(pred)\n titles.append('Prediction')\n\n plot_img_row(fig, grid_spec, 0, imgs, titles)\n make_legend(dataset.label_keys, dataset.label_names)\n plt.savefig(file_path, bbox_inches='tight', format='png', dpi=300)\n\n plt.close(fig)\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.squeeze",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.expand_dims",
"matplotlib.pyplot.close",
"matplotlib.use",
"numpy.pad",
"matplotlib.gridspec.GridSpec",
"numpy.array",
"matplotlib.patches.Patch"
]
] |
yuyunli2/faster_rcnn | [
"c8ddaa02fdc8ca36438713f2584d83dbbfae9ed9"
] | [
"vis_tool.py"
] | [
"import time\n\nimport numpy as np\nimport matplotlib\nimport torch as t\nimport visdom\n\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plot\n\n\nVOC_BBOX_LABEL_NAMES = (\n 'fly',\n 'bike',\n 'bird',\n 'boat',\n 'pin',\n 'bus',\n 'c',\n 'cat',\n 'chair',\n 'cow',\n 'table',\n 'dog',\n 'horse',\n 'moto',\n 'p',\n 'plant',\n 'shep',\n 'sofa',\n 'train',\n 'tv',\n)\n\n\ndef vis_image(img, ax=None):\n if(ax is None):\n fig = plot.figure()\n ax = fig.add_subplot(1, 1, 1)\n img = img.transpose((1, 2, 0))\n ax.imshow(img.astype(np.uint8))\n\n return ax\n\n\ndef vis_bbox(img, bbox, label=None, score=None, ax=None):\n label_names = list(VOC_BBOX_LABEL_NAMES) + ['bg']\n\n if label is not None and not len(bbox) == len(label):\n raise ValueError('The length of label must be same as that of bbox')\n if score is not None and not len(bbox) == len(score):\n raise ValueError('The length of score must be same as that of bbox')\n\n ax = vis_image(img, ax=ax)\n\n if(len(bbox) == 0):\n return ax\n\n for i, bb in enumerate(bbox):\n xy = (bb[1], bb[0])\n height = bb[2] - bb[0]\n width = bb[3] - bb[1]\n ax.add_patch(plot.Rectangle(\n xy, width, height, fill=False, edgecolor='red', linewidth=2))\n\n caption = list()\n\n if(label is not None and label_names is not None):\n lb = label[i]\n if(not (-1 <= lb < len(label_names))):\n raise ValueError('No corresponding name is given')\n caption.append(label_names[lb])\n if(score is not None):\n sc = score[i]\n caption.append('{:.2f}'.format(sc))\n\n if(len(caption) > 0):\n ax.text(bb[1], bb[0], ': '.join(caption), style='italic', bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 0})\n \n return ax\n\n\ndef fig2data(fig):\n fig.canvas.draw()\n\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = (w, h, 4)\n\n buf = np.roll(buf, 3, axis=2)\n return buf.reshape(h, w, 4)\n\n\ndef fig4vis(fig):\n ax = fig.get_figure()\n img_data = fig2data(ax).astype(np.int32)\n plot.close()\n\n return img_data[:, :, :3].transpose((2, 0, 1)) / 255.\n\n\ndef visdom_bbox(*args, **kwargs):\n fig = vis_bbox(*args, **kwargs)\n data = fig4vis(fig)\n return data\n\n\nclass Visualizer(object):\n def __init__(self, env='default', **kwargs):\n self.vis = visdom.Visdom(env=env, use_incoming_socket=False, **kwargs)\n self._vis_kw = kwargs\n self.index = {}\n self.log_text = ''\n\n def reinit(self, env='default', **kwargs):\n self.vis = visdom.Visdom(env=env, **kwargs)\n return self\n\n def plot_many(self, d):\n for k, v in d.items():\n if v is not None:\n self.plot(k, v)\n\n def img_many(self, d):\n for k, v in d.items():\n self.img(k, v)\n\n def plot(self, name, y, **kwargs):\n x = self.index.get(name, 0)\n self.vis.line(Y=np.array([y]), X=np.array([x]), win=name, opts=dict(title=name), update=None if x == 0 else 'append', **kwargs)\n self.index[name] = x + 1\n\n def img(self, name, img_, **kwargs):\n self.vis.images(t.Tensor(img_).cpu().numpy(), win=name, opts=dict(title=name), **kwargs)\n\n def log(self, info, win='log_text'):\n self.log_text += ('[{time}] {info} <br>'.format(time=time.strftime('%m%d_%H%M%S'), info=info))\n self.vis.text(self.log_text, win)\n\n def __getattr__(self, name):\n return getattr(self.vis, name)\n\n def state_dict(self):\n return {'index': self.index, 'vis_kw': self._vis_kw, 'log_text': self.log_text,'env': self.vis.env}\n\n def load_state_dict(self, d):\n self.vis = visdom.Visdom(env=d.get('env', self.vis.env), **(self.d.get('vis_kw')))\n self.log_text = d.get('log_text', '')\n self.index = d.get('index', dict())\n \n return self"
] | [
[
"numpy.roll",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"matplotlib.use",
"matplotlib.pyplot.Rectangle",
"torch.Tensor"
]
] |
deeptimittal12/python-neo | [
"7409f47b5debd4d2a75bbf0e77ac10562446c97a"
] | [
"neo/io/brainwaredamio.py"
] | [
"'''\nClass for reading from Brainware DAM files\n\nDAM files are binary files for holding raw data. They are broken up into\nsequence of Segments, each containing a single raw trace and parameters.\n\nThe DAM file does NOT contain a sampling rate, nor can it be reliably\ncalculated from any of the parameters. You can calculate it from\nthe \"sweep length\" attribute if it is present, but it isn't always present.\nIt is more reliable to get it from the corresponding SRC file or F32 file if\nyou have one.\n\nThe DAM file also does not divide up data into Blocks, so only a single\nBlock is returned..\n\nBrainware was developed by Dr. Jan Schnupp and is availabe from\nTucker Davis Technologies, Inc.\nhttp://www.tdt.com/downloads.htm\n\nNeither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the\ndevelopment of this code\n\nThe code is implemented with the permission of Dr. Jan Schnupp\n\nAuthor: Todd Jennings\n'''\n\n# import needed core python modules\nimport os\nimport os.path\n\n# numpy and quantities are already required by neo\nimport numpy as np\nimport quantities as pq\n\n# needed core neo modules\nfrom neo.core import (AnalogSignal, Block,\n ChannelIndex, Segment)\n\n# need to subclass BaseIO\nfrom neo.io.baseio import BaseIO\n\n\nclass BrainwareDamIO(BaseIO):\n \"\"\"\n Class for reading Brainware raw data files with the extension '.dam'.\n\n The read_block method returns the first Block of the file. It will\n automatically close the file after reading.\n The read method is the same as read_block.\n\n Note:\n\n The file format does not contain a sampling rate. The sampling rate\n is set to 1 Hz, but this is arbitrary. If you have a corresponding .src\n or .f32 file, you can get the sampling rate from that. It may also be\n possible to infer it from the attributes, such as \"sweep length\", if\n present.\n\n Usage:\n >>> from neo.io.brainwaredamio import BrainwareDamIO\n >>> damfile = BrainwareDamIO(filename='multi_500ms_mulitrep_ch1.dam')\n >>> blk1 = damfile.read()\n >>> blk2 = damfile.read_block()\n >>> print blk1.segments\n >>> print blk1.segments[0].analogsignals\n >>> print blk1.units\n >>> print blk1.units[0].name\n >>> print blk2\n >>> print blk2[0].segments\n \"\"\"\n\n is_readable = True # This class can only read data\n is_writable = False # write is not supported\n\n # This class is able to directly or indirectly handle the following objects\n # You can notice that this greatly simplifies the full Neo object hierarchy\n supported_objects = [Block, ChannelIndex,\n Segment, AnalogSignal]\n\n readable_objects = [Block]\n writeable_objects = []\n\n has_header = False\n is_streameable = False\n\n # This is for GUI stuff: a definition for parameters when reading.\n # This dict should be keyed by object (`Block`). Each entry is a list\n # of tuple. The first entry in each tuple is the parameter name. The\n # second entry is a dict with keys 'value' (for default value),\n # and 'label' (for a descriptive name).\n # Note that if the highest-level object requires parameters,\n # common_io_test will be skipped.\n read_params = {Block: []}\n\n # do not support write so no GUI stuff\n write_params = None\n name = 'Brainware DAM File'\n extensions = ['dam']\n\n mode = 'file'\n\n def __init__(self, filename=None):\n '''\n Arguments:\n filename: the filename\n '''\n BaseIO.__init__(self)\n self._path = filename\n self._filename = os.path.basename(filename)\n self._fsrc = None\n\n def read(self, lazy=False, **kargs):\n '''\n Reads raw data file \"fname\" generated with BrainWare\n '''\n assert not lazy, 'Do not support lazy'\n return self.read_block(lazy=lazy)\n\n def read_block(self, lazy=False, **kargs):\n '''\n Reads a block from the raw data file \"fname\" generated\n with BrainWare\n '''\n assert not lazy, 'Do not support lazy'\n\n # there are no keyargs implemented to so far. If someone tries to pass\n # them they are expecting them to do something or making a mistake,\n # neither of which should pass silently\n if kargs:\n raise NotImplementedError('This method does not have any '\n 'arguments implemented yet')\n self._fsrc = None\n\n block = Block(file_origin=self._filename)\n\n # create the objects to store other objects\n chx = ChannelIndex(file_origin=self._filename,\n channel_ids=np.array([1]),\n index=np.array([0]),\n channel_names=np.array(['Chan1'], dtype='S'))\n\n # load objects into their containers\n block.channel_indexes.append(chx)\n\n # open the file\n with open(self._path, 'rb') as fobject:\n # while the file is not done keep reading segments\n while True:\n seg = self._read_segment(fobject)\n # if there are no more Segments, stop\n if not seg:\n break\n\n # store the segment and signals\n seg.analogsignals[0].channel_index = chx\n block.segments.append(seg)\n\n # remove the file object\n self._fsrc = None\n\n block.create_many_to_one_relationship()\n return block\n\n # -------------------------------------------------------------------------\n # -------------------------------------------------------------------------\n # IMPORTANT!!!\n # These are private methods implementing the internal reading mechanism.\n # Due to the way BrainWare DAM files are structured, they CANNOT be used\n # on their own. Calling these manually will almost certainly alter your\n # position in the file in an unrecoverable manner, whether they throw\n # an exception or not.\n # -------------------------------------------------------------------------\n # -------------------------------------------------------------------------\n\n def _read_segment(self, fobject):\n '''\n Read a single segment with a single analogsignal\n\n Returns the segment or None if there are no more segments\n '''\n\n try:\n # float64 -- start time of the AnalogSignal\n t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]\n except IndexError:\n # if there are no more Segments, return\n return False\n\n # int16 -- index of the stimulus parameters\n seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()\n\n # int16 -- number of stimulus parameters\n numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]\n\n # read the name strings for the stimulus parameters\n paramnames = []\n for _ in range(numelements):\n # unit8 -- the number of characters in the string\n numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]\n\n # char * numchars -- a single name string\n name = np.fromfile(fobject, dtype=np.uint8, count=numchars)\n\n # exclude invalid characters\n name = str(name[name >= 32].view('c').tostring())\n\n # add the name to the list of names\n paramnames.append(name)\n\n # float32 * numelements -- the values for the stimulus parameters\n paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)\n\n # combine parameter names and the parameters as a dict\n params = dict(zip(paramnames, paramvalues))\n\n # int32 -- the number elements in the AnalogSignal\n numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]\n\n # int16 * numpts -- the AnalogSignal itself\n signal = np.fromfile(fobject, dtype=np.int16, count=numpts)\n\n sig = AnalogSignal(signal.astype(np.float) * pq.mV,\n t_start=t_start * pq.d,\n file_origin=self._filename,\n sampling_period=1. * pq.s,\n copy=False)\n # Note: setting the sampling_period to 1 s is arbitrary\n\n # load the AnalogSignal and parameters into a new Segment\n seg = Segment(file_origin=self._filename,\n index=seg_index,\n **params)\n seg.analogsignals = [sig]\n\n return seg\n"
] | [
[
"numpy.array",
"numpy.fromfile"
]
] |
LeonardoSirino/FuzzyTableExtractor | [
"114f5b2b1c65bfcaa84cb75c876b68ce1974c821"
] | [
"fuzzy_table_extractor/extractor.py"
] | [
"from collections import deque\nfrom dataclasses import dataclass\nfrom enum import Enum, auto\nfrom typing import Callable, Iterable, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom .handlers.base_handler import BaseHandler, BaseNode, TreeFileHandler\nfrom .util import match_regex_list, str_comparison\n\n\n@dataclass\nclass TableMatch:\n search_term: str\n original_term: str\n score: str\n\n\nclass FieldOrientation(Enum):\n ROW = auto()\n COLUMN = auto()\n\n\nclass Extractor:\n \"\"\"The Extractor class has the functions to extract data from tables in document.\n It receives a document handler on initialization, this handler follows an interface, so the extraction is agnostic of the document type.\n \"\"\"\n\n def __init__(self, doc_handler: BaseHandler) -> None:\n \"\"\"Initialize the extractor with a document handler\n\n Args:\n doc_handler (BaseHandler): document handler to extract data from\n \"\"\"\n self.doc_handler = doc_handler\n\n def extract_closest_table(\n self,\n search_headers: List[str],\n validation_funtion: Callable[[List[str]], bool] = lambda x: True,\n minimum_proximity_ratio: float = 0,\n ) -> pd.DataFrame:\n \"\"\"Extract the table in document that has the closest header to search_headers\n\n Args:\n search_headers (List[str]): list of itens to search in header.\n validation_funtion (Callable[[List[str]], bool], optional): function to validate if the table is valid. This function receives the table header as argument and must return True if the table is valid. Defaults to lambda x: True.\n minimum_proximity_ratio (float, optional): minimum proximity ratio to consider there is a match in header. Value must be between 0 and 100. Defaults to 0.\n\n Returns:\n pd.DataFrame: best match\n \"\"\"\n if minimum_proximity_ratio < 0 or minimum_proximity_ratio > 100:\n raise ValueError(\"minimum_proximity_ratio must be between 0 and 100\")\n\n tables = self.doc_handler.tables\n ratios = []\n for df in tables:\n if validation_funtion(df.columns.to_list()):\n ratio = self.headers_proximity_ratio(\n document_headers=df.columns.to_list(), search_headers=search_headers\n )\n\n ratios.append(ratio)\n else:\n ratios.append(0)\n\n if len(ratios) == 0:\n return pd.DataFrame()\n\n best_ratio = np.max(ratios)\n if best_ratio < minimum_proximity_ratio:\n return pd.DataFrame()\n\n best_match = tables[np.argmax(ratios)]\n\n df = self.get_columns_fuzzy(best_match, search_headers)\n\n return df\n\n def extract_single_field(\n self,\n field: str,\n orientation: FieldOrientation,\n regex: List[str] = [\"\"],\n title_regex: List[str] = [\"\"],\n return_multiple: bool = False,\n ) -> str:\n \"\"\"Extract single field of a word document based on a input string.\n The data will be extracted from tables in document\n\n Args:\n field (str): search field\n orientation (FieldOrientation): orientation to search the content of field\n regex (List[str], optional): list of regex to apply to content. To be a valid content there must be at least one match of regex in list. Defaults to [''].\n title_regex (List[str], optional): list of regex to apply to title. To be a valid title there must be at least one match of regex in list. Defaults to [''].\n return_multiple (bool, optional): if True, will return all matches that has the same proximity ratio. Defaults to False.\n\n Returns:\n str: best match\n \"\"\"\n df = self.doc_handler.dictionary\n\n df = df[df[\"orientation\"] == orientation.name.lower()]\n df = df[df[\"content\"].apply(lambda x: match_regex_list(x, regex))]\n df = df[df[\"title\"].apply(lambda x: match_regex_list(x, title_regex))]\n\n if df.empty:\n return \"\"\n\n df[\"ratio\"] = df[\"title\"].apply(lambda x: str_comparison(x, field))\n df.sort_values(by=\"ratio\", inplace=True, ascending=False)\n\n try:\n if return_multiple:\n max_ratio = df[\"ratio\"].max()\n values = df[df[\"ratio\"] == max_ratio][\"content\"].to_list()\n best_match = \", \".join(values)\n else:\n best_match = df[\"content\"].values[0]\n except IndexError:\n best_match = \"\"\n\n return best_match\n\n @staticmethod\n def headers_proximity_ratio(\n document_headers: List[str], search_headers: List[str]\n ) -> int:\n \"\"\"Calculates a proximity ratio of two headers\n\n Args:\n document_headers (List[str]): headers in document\n search_headers (List[str]): search headers\n\n Returns:\n int: proximity ratio\n \"\"\"\n matches = Extractor.headers_association(document_headers, search_headers)\n\n if len(matches) == 0:\n return 0\n\n scores = [x.score for x in matches]\n\n return min(scores)\n\n @staticmethod\n def headers_association(\n document_headers: List[str], search_headers: List[str]\n ) -> List[TableMatch]:\n # TODO I think this can be improved\n \"\"\"Determine the best association of two headers\n\n Args:\n document_headers (List[str]): headers in document\n search_headers (List[str]): search headers\n\n Returns:\n List[TableMatch]: list of table headers matches\n \"\"\"\n if len(search_headers) > len(document_headers):\n return []\n\n matches = []\n\n for s_header in search_headers:\n scores = [str_comparison(x, s_header) for x in document_headers]\n\n max_index = np.argmax(scores)\n max_score = np.max(scores)\n\n entry = TableMatch(\n search_term=s_header,\n original_term=document_headers[max_index],\n score=max_score,\n )\n\n matches.append(entry)\n\n document_headers.pop(max_index)\n\n return matches\n\n @staticmethod\n def get_columns_fuzzy(\n df: pd.DataFrame, columns: List[str], threshold=0\n ) -> pd.DataFrame:\n \"\"\"Get columns that hat the closest match with supplied columns names\n The columns will be renamed to match the closest column name\n\n Args:\n df (pd.DataFrame): dataframe to search columns\n columns (List[str]): columns to search\n threshold (int, optional): minimum score to consider a match. Defaults to 0.\n\n Returns:\n List[str]: columns that match\n \"\"\"\n association = Extractor.headers_association(df.columns.to_list(), columns)\n\n association = [x for x in association if x.score > threshold]\n\n original = [x.original_term for x in association]\n df = df[original]\n\n rename_dict = {x.original_term: x.search_term for x in association}\n df.rename(columns=rename_dict, inplace=True)\n\n return df\n\n\n@dataclass\nclass _SectionPath:\n nodes: List[BaseNode]\n score: float = 0\n\n def add_node(self, node: BaseNode, section_names: List[str]):\n name = section_names[len(self.nodes) - 1]\n self.score += str_comparison(name, node.title)\n\n\nclass TreeExtractor(Extractor):\n def __init__(self, doc_handler: TreeFileHandler):\n super().__init__(doc_handler)\n self.doc_handler = doc_handler\n\n def get_closest_section(self, titles: List[str]) -> BaseNode:\n \"\"\"Get the closest section given a list of section titles.\n To reach the closest section all possible paths will be explored, the best path\n is the one with the highest sum of title comparison scores.\n\n Args:\n titles (List[str]): list of section titles\n\n Returns:\n BaseNode: closest section\n \"\"\"\n\n initial_path = _SectionPath(nodes=[self.doc_handler.root], score=0)\n paths = deque([initial_path])\n valid_paths = []\n\n while paths:\n path = paths.popleft()\n for node in path.nodes:\n new_path = _SectionPath(nodes=path.nodes[:], score=path.score)\n new_path.add_node(node, titles)\n\n if len(new_path.nodes) == len(titles) + 1:\n valid_paths.append(new_path)\n else:\n paths.append(new_path)\n\n best_path = max(valid_paths, key=lambda x: x.score)\n return best_path\n"
] | [
[
"numpy.max",
"pandas.DataFrame",
"numpy.argmax"
]
] |
ccyjw8860/deep-text-recognition-benchmark | [
"96f526e6d58e2d0685a5e062f472a3cb7310b8be"
] | [
"train_original.py"
] | [
"import os\nimport sys\nimport time\nimport random\nimport string\nimport argparse\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.optim as optim\nimport torch.utils.data\nimport numpy as np\n\nfrom utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\nfrom dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\nfrom model import Model\nfrom test import validation\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef train(opt):\n \"\"\" dataset preparation \"\"\"\n if not opt.data_filtering_off:\n print('Filtering the images containing characters which are not in opt.character')\n print('Filtering the images whose label is longer than opt.batch_max_length')\n # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130\n\n opt.select_data = opt.select_data.split('-')\n opt.batch_ratio = opt.batch_ratio.split('-')\n train_dataset = Batch_Balanced_Dataset(opt)\n\n log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')\n AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset, batch_size=opt.batch_size,\n shuffle=True, # 'True' to check training progress with validation function.\n num_workers=int(opt.workers),\n collate_fn=AlignCollate_valid, pin_memory=True)\n log.write(valid_dataset_log)\n print('-' * 80)\n log.write('-' * 80 + '\\n')\n log.close()\n\n \"\"\" model configuration \"\"\"\n if 'CTC' in opt.Prediction:\n if opt.baiduCTC:\n converter = CTCLabelConverterForBaiduWarpctc(opt.character)\n else:\n converter = CTCLabelConverter(opt.character)\n else:\n converter = AttnLabelConverter(opt.character)\n opt.num_class = len(converter.character)\n\n if opt.rgb:\n opt.input_channel = 3\n model = Model(opt)\n print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,\n opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,\n opt.SequenceModeling, opt.Prediction)\n\n # weight initialization\n for name, param in model.named_parameters():\n if 'localization_fc2' in name:\n print(f'Skip {name} as it is already initialized')\n continue\n try:\n if 'bias' in name:\n init.constant_(param, 0.0)\n elif 'weight' in name:\n init.kaiming_normal_(param)\n except Exception as e: # for batchnorm.\n if 'weight' in name:\n param.data.fill_(1)\n continue\n\n # data parallel for multi-GPU\n model = torch.nn.DataParallel(model).to(device)\n model.train()\n if opt.saved_model != '':\n print(f'loading pretrained model from {opt.saved_model}')\n if opt.FT:\n model.load_state_dict(torch.load(opt.saved_model), strict=False)\n else:\n model.load_state_dict(torch.load(opt.saved_model))\n print(\"Model:\")\n print(model)\n\n \"\"\" setup loss \"\"\"\n if 'CTC' in opt.Prediction:\n if opt.baiduCTC:\n # need to install warpctc. see our guideline.\n from warpctc_pytorch import CTCLoss\n criterion = CTCLoss()\n else:\n criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)\n else:\n criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0\n # loss averager\n loss_avg = Averager()\n\n # filter that only require gradient decent\n filtered_parameters = []\n params_num = []\n for p in filter(lambda p: p.requires_grad, model.parameters()):\n filtered_parameters.append(p)\n params_num.append(np.prod(p.size()))\n print('Trainable params num : ', sum(params_num))\n # [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]\n\n # setup optimizer\n if opt.adam:\n optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))\n else:\n optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)\n print(\"Optimizer:\")\n print(optimizer)\n\n \"\"\" final options \"\"\"\n # print(opt)\n with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:\n opt_log = '------------ Options -------------\\n'\n args = vars(opt)\n for k, v in args.items():\n opt_log += f'{str(k)}: {str(v)}\\n'\n opt_log += '---------------------------------------\\n'\n print(opt_log)\n opt_file.write(opt_log)\n\n \"\"\" start training \"\"\"\n start_iter = 0\n if opt.saved_model != '':\n try:\n start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])\n print(f'continue to train, start_iter: {start_iter}')\n except:\n pass\n\n start_time = time.time()\n best_accuracy = -1\n best_norm_ED = -1\n iteration = start_iter\n\n while (True):\n # train part\n image_tensors, labels = train_dataset.get_batch()\n image = image_tensors.to(device)\n text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)\n batch_size = image.size(0)\n\n if 'CTC' in opt.Prediction:\n preds = model(image, text)\n preds_size = torch.IntTensor([preds.size(1)] * batch_size)\n if opt.baiduCTC:\n preds = preds.permute(1, 0, 2) # to use CTCLoss format\n cost = criterion(preds, text, preds_size, length) / batch_size\n else:\n preds = preds.log_softmax(2).permute(1, 0, 2)\n cost = criterion(preds, text, preds_size, length)\n\n else:\n preds = model(image, text[:, :-1]) # align with Attention.forward\n target = text[:, 1:] # without [GO] Symbol\n cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))\n\n model.zero_grad()\n cost.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)\n optimizer.step()\n\n loss_avg.add(cost)\n\n # validation part\n if (\n iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'\n elapsed_time = time.time() - start_time\n # for log\n with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:\n model.eval()\n with torch.no_grad():\n valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(\n model, criterion, valid_loader, converter, opt)\n model.train()\n\n # training loss and validation loss\n loss_log = f'[{iteration + 1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'\n loss_avg.reset()\n\n current_model_log = f'{\"Current_accuracy\":17s}: {current_accuracy:0.3f}, {\"Current_norm_ED\":17s}: {current_norm_ED:0.2f}'\n\n # keep best accuracy model (on valid dataset)\n if current_accuracy > best_accuracy:\n best_accuracy = current_accuracy\n torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')\n if current_norm_ED > best_norm_ED:\n best_norm_ED = current_norm_ED\n torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')\n best_model_log = f'{\"Best_accuracy\":17s}: {best_accuracy:0.3f}, {\"Best_norm_ED\":17s}: {best_norm_ED:0.2f}'\n\n loss_model_log = f'{loss_log}\\n{current_model_log}\\n{best_model_log}'\n print(loss_model_log)\n log.write(loss_model_log + '\\n')\n\n # show some predicted results\n dashed_line = '-' * 80\n head = f'{\"Ground Truth\":25s} | {\"Prediction\":25s} | Confidence Score & T/F'\n predicted_result_log = f'{dashed_line}\\n{head}\\n{dashed_line}\\n'\n for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):\n if 'Attn' in opt.Prediction:\n gt = gt[:gt.find('[s]')]\n pred = pred[:pred.find('[s]')]\n\n predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\\t{str(pred == gt)}\\n'\n predicted_result_log += f'{dashed_line}'\n print(predicted_result_log)\n log.write(predicted_result_log + '\\n')\n\n # save model per 1e+5 iter.\n if (iteration + 1) % 1e+5 == 0:\n torch.save(\n model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration + 1}.pth')\n\n if (iteration + 1) == opt.num_iter:\n print('end the training')\n sys.exit()\n iteration += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', help='Where to store logs and models')\n parser.add_argument('--train_data', required=True, help='path to training dataset', default=\"D:/data/data_lmdb_release/training\")\n parser.add_argument('--valid_data', required=True, help='path to validation dataset', default=\"D:/data/data_lmdb_release/validation\")\n parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')\n parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\n parser.add_argument('--batch_size', type=int, default=192, help='input batch size')\n parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')\n parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')\n parser.add_argument('--saved_model', default='', help=\"path to model to continue training\")\n parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')\n parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')\n parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')\n parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')\n parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')\n parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')\n parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')\n parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')\n \"\"\" Data processing \"\"\"\n parser.add_argument('--select_data', type=str, default='MJ-ST',\n help='select training data (default is MJ-ST, which means MJ and ST used as training data)')\n parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',\n help='assign ratio for each selected data in the batch')\n parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',\n help='total data usage ratio, this ratio is multiplied to total number of data.')\n parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')\n parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')\n parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')\n parser.add_argument('--rgb', action='store_true', help='use rgb input')\n parser.add_argument('--character', type=str,\n default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')\n parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')\n parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')\n parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')\n \"\"\" Model Architecture \"\"\"\n parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')\n parser.add_argument('--FeatureExtraction', type=str, required=True,\n help='FeatureExtraction stage. VGG|RCNN|ResNet')\n parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')\n parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')\n parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')\n parser.add_argument('--input_channel', type=int, default=1,\n help='the number of input channel of Feature extractor')\n parser.add_argument('--output_channel', type=int, default=512,\n help='the number of output channel of Feature extractor')\n parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')\n\n opt = parser.parse_args()\n\n if not opt.exp_name:\n opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'\n opt.exp_name += f'-Seed{opt.manualSeed}'\n # print(opt.exp_name)\n\n os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)\n\n \"\"\" vocab / character number configuration \"\"\"\n if opt.sensitive:\n # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).\n\n \"\"\" Seed and GPU setting \"\"\"\n # print(\"Random Seed: \", opt.manualSeed)\n random.seed(opt.manualSeed)\n np.random.seed(opt.manualSeed)\n torch.manual_seed(opt.manualSeed)\n torch.cuda.manual_seed(opt.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n opt.num_gpu = torch.cuda.device_count()\n # print('device count', opt.num_gpu)\n if opt.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n opt.workers = opt.workers * opt.num_gpu\n opt.batch_size = opt.batch_size * opt.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"\n\n train(opt)"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.optim.Adadelta",
"torch.nn.init.constant_",
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.no_grad",
"numpy.random.seed",
"torch.cuda.device_count",
"torch.nn.CrossEntropyLoss",
"torch.optim.Adam",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.nn.CTCLoss"
]
] |
Seondong/talkingdata_kaggle_201608 | [
"b9ddbb343dacbcfdaa4b2732c9ea23bf776c773b"
] | [
"code/xgboost_baseline.py"
] | [
"__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'\n\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nimport xgboost as xgb\nimport random\nimport zipfile\nimport time\nimport shutil\nfrom sklearn.metrics import log_loss\n\nrandom.seed(2016)\n\ndef run_xgb(train, test, features, target, random_state=0):\n eta = 0.1\n max_depth = 3\n subsample = 0.7\n colsample_bytree = 0.7\n start_time = time.time()\n\n print('XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(eta, max_depth, subsample, colsample_bytree))\n params = {\n \"objective\": \"multi:softprob\",\n \"num_class\": 12,\n \"booster\" : \"gbtree\",\n \"eval_metric\": \"mlogloss\",\n \"eta\": eta,\n \"max_depth\": max_depth,\n \"subsample\": subsample,\n \"colsample_bytree\": colsample_bytree,\n \"silent\": 1,\n \"seed\": random_state,\n }\n num_boost_round = 500\n early_stopping_rounds = 50\n test_size = 0.3\n\n X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)\n print('Length train:', len(X_train.index))\n print('Length valid:', len(X_valid.index))\n y_train = X_train[target]\n y_valid = X_valid[target]\n dtrain = xgb.DMatrix(X_train[features], y_train)\n dvalid = xgb.DMatrix(X_valid[features], y_valid)\n\n watchlist = [(dtrain, 'train'), (dvalid, 'eval')]\n gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=True)\n\n print(\"Validating...\")\n check = gbm.predict(xgb.DMatrix(X_valid[features]), ntree_limit=gbm.best_iteration)\n score = log_loss(y_valid.tolist(), check)\n\n print(\"Predict test set...\")\n test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration)\n\n print('Training time: {} minutes'.format(round((time.time() - start_time)/60, 2)))\n return test_prediction.tolist(), score\n\n\ndef create_submission(score, test, prediction):\n # Make Submission\n now = datetime.datetime.now()\n sub_file = 'submission_' + str(score) + '_' + str(now.strftime(\"%Y-%m-%d-%H-%M\")) + '.csv'\n print('Writing submission: ', sub_file)\n f = open(sub_file, 'w')\n f.write('device_id,F23-,F24-26,F27-28,F29-32,F33-42,F43+,M22-,M23-26,M27-28,M29-31,M32-38,M39+\\n')\n total = 0\n test_val = test['device_id'].values\n for i in range(len(test_val)):\n str1 = str(test_val[i])\n for j in range(12):\n str1 += ',' + str(prediction[i][j])\n str1 += '\\n'\n total += 1\n f.write(str1)\n f.close()\n\n\ndef map_column(table, f):\n labels = sorted(table[f].unique())\n mappings = dict()\n for i in range(len(labels)):\n mappings[labels[i]] = i\n table = table.replace({f: mappings})\n return table\n\n\ndef read_train_test():\n # Events\n print('Read events...')\n events = pd.read_csv(\"../input/events.csv\", dtype={'device_id': np.str})\n events['counts'] = events.groupby(['device_id'])['event_id'].transform('count')\n events_small = events[['device_id', 'counts']].drop_duplicates('device_id', keep='first')\n\n # Phone brand\n print('Read brands...')\n pbd = pd.read_csv(\"../input/phone_brand_device_model.csv\", dtype={'device_id': np.str})\n pbd.drop_duplicates('device_id', keep='first', inplace=True)\n pbd = map_column(pbd, 'phone_brand')\n pbd = map_column(pbd, 'device_model')\n\n # Train\n print('Read train...')\n train = pd.read_csv(\"../input/gender_age_train.csv\", dtype={'device_id': np.str})\n train = map_column(train, 'group')\n train = train.drop(['age'], axis=1)\n train = train.drop(['gender'], axis=1)\n train = pd.merge(train, pbd, how='left', on='device_id', left_index=True)\n train = pd.merge(train, events_small, how='left', on='device_id', left_index=True)\n train.fillna(-1, inplace=True)\n\n # Test\n print('Read test...')\n test = pd.read_csv(\"../input/gender_age_test.csv\", dtype={'device_id': np.str})\n test = pd.merge(test, pbd, how='left', on='device_id', left_index=True)\n test = pd.merge(test, events_small, how='left', on='device_id', left_index=True)\n test.fillna(-1, inplace=True)\n\n # Features\n features = list(test.columns.values)\n features.remove('device_id')\n\n return train, test, features\n\n\ntrain, test, features = read_train_test()\nprint('Length of train: ', len(train))\nprint('Length of test: ', len(test))\nprint('Features [{}]: {}'.format(len(features), sorted(features)))\ntest_prediction, score = run_xgb(train, test, features, 'group')\nprint(\"LS: {}\".format(round(score, 5)))\ncreate_submission(score, test, test_prediction)"
] | [
[
"pandas.read_csv",
"pandas.merge",
"sklearn.cross_validation.train_test_split"
]
] |
shengyushen/training | [
"0db663b86001dfc359da98c1504a7a3cb8e1f617"
] | [
"mnist/mnist_with_summaries_bf16.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple MNIST classifier which displays summaries in TensorBoard.\n\nThis is an unimpressive MNIST model, but it is a good example of using\ntf.name_scope to make a graph legible in the TensorBoard graph explorer, and of\nnaming summary tags so that they are grouped meaningfully in TensorBoard.\n\nIt demonstrates the functionality of every TensorBoard dashboard.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nimport tensorflow as tf\n#from tensorflow.python.lib.io import file_io\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nfrom pgrad import *\nFLAGS = None\n\n\ndef train():\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir,\n fake_data=FLAGS.fake_data)\n\n #sess = tf.InteractiveSession()\n sess = tf.InteractiveSession()\n # Create a multilayer model.\n\n # Input placeholders\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n y_ = tf.placeholder(tf.int64, [None], name='y-input')\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n tf.summary.image('input', image_shaped_input, 10)\n\n # We can't initialize these variables to 0 - the network will get stuck.\n def weight_variable(shape):\n \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(input_tensor=var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(input_tensor=var))\n tf.summary.scalar('min', tf.reduce_min(input_tensor=var))\n tf.summary.histogram('histogram', var)\n\n def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n \"\"\"Reusable code for making a simple neural net layer.\n\n It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n It also sets up name scoping so that the resultant graph is easy to read,\n and adds a number of summary ops.\n \"\"\"\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n variable_summaries(biases)\n with tf.name_scope('Wx_plus_b'):\n # SSY /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/math_ops.py\n input_tensor = id_bf16cut_fp(input_tensor)\n weights = id_bf16cut_fp(weights)\n preactivate = tf.matmul(input_tensor, weights) + biases\n preactivate=id_bf16cut_bp(preactivate)\n tf.summary.histogram('pre_activations', preactivate)\n activations = act(preactivate, name='activation')\n tf.summary.histogram('activations', activations)\n return activations\n\n hidden1 = nn_layer(x, 784, 500, 'layer1')\n\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n tf.summary.scalar('dropout_keep_probability', keep_prob)\n #dropped = tf.nn.dropout(hidden1, rate=(1 - keep_prob))\n dropped = tf.nn.dropout(hidden1, keep_prob=keep_prob)\n\n # Do not apply softmax activation yet, see below.\n y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)\n\n with tf.name_scope('cross_entropy'):\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.math.log(tf.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.losses.sparse_softmax_cross_entropy on the\n # raw logit outputs of the nn_layer above, and then average across\n # the batch.\n with tf.name_scope('total'):\n cross_entropy = tf.losses.sparse_softmax_cross_entropy(\n labels=y_, logits=y)\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n with tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(\n cross_entropy)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(input=y, axis=1), y_)\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prediction,\n tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n # Merge all the summaries and write them out to\n # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train',\n sess.graph)\n test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\n tf.global_variables_initializer().run()\n\n # Train the model, and also write summaries.\n # Every 10th step, measure test-set accuracy, and write test summaries\n # All other steps, run train_step on training data, & add training summaries\n\n def feed_dict(train):\n \"\"\"Make a TensorFlow feed_dict: maps data onto Tensor placeholders.\"\"\"\n if train or FLAGS.fake_data:\n xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)\n k = FLAGS.dropout\n else:\n xs, ys = mnist.test.images, mnist.test.labels\n k = 1.0\n return {x: xs, y_: ys, keep_prob: k}\n\n for i in range(FLAGS.max_steps):\n if i % 10 == 0: # Record summaries and test-set accuracy\n summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n else: # Record train set summaries, and train\n if i % 100 == 99: # Record execution stats\n run_options = tf.RunOptions(\n trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([merged, train_step],\n feed_dict=feed_dict(True),\n options=run_options,\n run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n print('Adding run metadata for', i)\n else: # Record a summary\n summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))\n train_writer.add_summary(summary, i)\n train_writer.close()\n test_writer.close()\n\n\ndef main(_):\n if tf.gfile.Exists(FLAGS.log_dir):\n tf.gfile.DeleteRecursively(FLAGS.log_dir)\n tf.gfile.MakeDirs(FLAGS.log_dir)\n with tf.Graph().as_default():\n train()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--fake_data', nargs='?', const=True, type=bool,\n default=False,\n help='If true, uses fake data for unit testing.')\n parser.add_argument('--max_steps', type=int, default=1000,\n help='Number of steps to run trainer.')\n parser.add_argument('--learning_rate', type=float, default=0.001,\n help='Initial learning rate')\n parser.add_argument('--dropout', type=float, default=0.9,\n help='Keep probability for training dropout.')\n parser.add_argument(\n '--data_dir',\n type=str,\n default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/mnist/input_data'),\n help='Directory for storing input data')\n parser.add_argument(\n '--log_dir',\n type=str,\n default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/mnist/logs/mnist_with_summaries'),\n help='Summaries log directory')\n FLAGS, unparsed = parser.parse_known_args()\n # I am using tf 1.18 so I dont need compat v1\n #tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.reduce_max",
"tensorflow.reshape",
"tensorflow.summary.image",
"tensorflow.matmul",
"tensorflow.InteractiveSession",
"tensorflow.name_scope",
"tensorflow.Variable",
"tensorflow.summary.FileWriter",
"tensorflow.nn.dropout",
"tensorflow.summary.histogram",
"tensorflow.global_variables_initializer",
"tensorflow.Graph",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_min",
"tensorflow.constant",
"tensorflow.app.run",
"tensorflow.cast",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.truncated_normal",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.RunMetadata",
"tensorflow.square",
"tensorflow.argmax",
"tensorflow.gfile.Exists",
"tensorflow.gfile.DeleteRecursively"
]
] |
laket/ape-x | [
"8ccb4192206d9529b5105e9fffd3cff143f48864"
] | [
"replay_buffer_actor.py"
] | [
"'''\nCopyright (c) 2018 Uber Technologies, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport tensorflow as tf\nimport numpy as np\n\nimport models\nfrom ops.segment_tree import ShortTermBuffer\n\nfrom gym_tensorflow.wrappers.base import BaseWrapper\n\ndef make_masked_frame(frames, dones, data_format):\n \"\"\"doneなframesは0、それ以外はもとの値を持つTensor群を返す\n\n :param list[tf.Tensor] frames: ここのTensorはNCHWっぽい (data_formatに従う)\n :param tuple[tf.Tensor] dones: ここのTensorはframes[i]の0次元目の長さと一致するbool\n :param data_format:\n :return:\n \"\"\"\n frames = list(frames[:])\n mask = None\n # donesを反転して次元を後ろに4つつける (4,) => (4,1,1,1)\n not_dones = [tf.cast(tf.logical_not(d), frames[0].dtype) if d is not None else None for d in dones]\n not_dones = [tf.expand_dims(d, axis=-1) if d is not None else None for d in not_dones]\n not_dones = [tf.expand_dims(d, axis=-1) if d is not None else None for d in not_dones]\n not_dones = [tf.expand_dims(d, axis=-1) if d is not None else None for d in not_dones]\n for i in np.flip(np.arange(len(frames) - 1), 0):\n if mask is None:\n mask = not_dones[i]\n else:\n mask = mask * not_dones[i]\n frames[i] = tf.image.convert_image_dtype(frames[i] * mask, tf.float32)\n frames[-1] = tf.image.convert_image_dtype(frames[-1], tf.float32)\n if data_format == 'NHWC':\n return tf.concat(frames, axis=-1, name='concat_masked_frames')\n elif data_format == 'NCHW':\n return tf.concat(frames, axis=-3, name='concat_masked_frames')\n else:\n raise NotImplementedError()\n\n\nclass ReplayBufferWrapper(BaseWrapper):\n \"\"\"行動をBufferに蓄積する環境\n\n BaseWrapperは環境用のクラス\n\n 利用例 (Prioritizedはこのクラスを継承している)\n PrioritizedReplayBufferWrapper(envs[actor_num], actor_num, actor_fifo, framestack, data_format, multi_step_n=multi_step_n)\n\n \"\"\"\n\n def __init__(self, env, actor_num, queue, num_stacked_frames, data_format):\n \"\"\"\n\n :param gym_tensorflow.atari.tf_atari.AtariEnv env: step等の関数を持つ環境 (AtariEnvとかくる)\n :param actor_num:\n :param tf.FIFOQueue queue:\n :param int num_stacked_frames: おそらく状態として何フレームを一括として扱うか\n :param data_format:\n \"\"\"\n super(ReplayBufferWrapper, self).__init__(env)\n self.queue = queue\n self.actor_num = actor_num\n self.num_stacked_frames = num_stacked_frames\n self.data_format = data_format\n\n with tf.device('/cpu:0'):\n if data_format == 'NCHW':\n obs_space = env.observation_space[0], env.observation_space[-1], env.observation_space[1], env.observation_space[2]\n else:\n obs_space = env.observation_space\n # 常にnum_stacked_framesをトラックする\n self.buffer = ShortTermBuffer(shapes=[obs_space, (env.batch_size,)], dtypes=[tf.uint8, tf.bool],\n framestack=num_stacked_frames, multi_step=0)\n\n @property\n def observation_space(self):\n return self.env.observation_space[:-1] + (self.env.observation_space[-1] * self.num_stacked_frames, )\n\n def observation(self, indices=None, reset=False, name=None):\n \"\"\"現在のstateを返す。ただし、num_stacked_frames分拡張されたobservationを返す\n\n :param indices: batchの中で一部のものをtrackしている場合かな? (どこで使っている?)\n :param reset: 未使用\n :param name: 未使用\n :return:\n \"\"\"\n assert indices is None\n obs = self.env.observation(indices)\n if self.data_format == 'NCHW':\n obs = tf.transpose(obs, (0, 3, 1, 2))\n\n with tf.device('/cpu:0'):\n _, recent_obs_done = self.buffer.encode_history()\n\n observations, dones=zip( * recent_obs_done[1 - self.num_stacked_frames:])\n observations += (obs,)\n dones += (None,)\n\n return make_masked_frame(observations, dones, self.data_format)\n\n def step(self, action, indices=None, name=None):\n assert indices is None\n sliced_act_obs = self.env.observation(indices)\n if self.data_format == 'NCHW':\n sliced_act_obs = tf.transpose(sliced_act_obs, (0, 3, 1, 2))\n\n sliced_act_obs = tf.image.convert_image_dtype(sliced_act_obs, tf.uint8)\n assert sliced_act_obs.dtype == tf.uint8\n\n with tf.device('/cpu:0'):\n _, recent_obs_done = self.buffer.encode_history()\n\n observations, dones=zip( * recent_obs_done[1 - self.num_stacked_frames:])\n observations += (sliced_act_obs,)\n dones += (None,)\n\n # 直近の4フレームをstateとしてまとめる\n obs = make_masked_frame(observations, dones, self.data_format)\n with tf.control_dependencies([sliced_act_obs]):\n # 1stepすすめる\n rew, done = self.env.step(action=action, indices=indices, name=name)\n # (入力画像, 完了済み)のペアをShortTermBufferに入れる\n # 遷移後のstateは次のstepなりobservationなりで取る思想っぽい\n update_recent_history = self.buffer.enqueue([sliced_act_obs, done])\n\n # 観測列をReplayBufferに入れる\n enqueue_op = self.queue.enqueue([obs, sliced_act_obs, rew, done, action, self.actor_num])\n\n with tf.control_dependencies([update_recent_history[0].op, enqueue_op]):\n return tf.identity(rew), tf.identity(done)\n\n\nclass PrioritizedReplayBufferWrapper(ReplayBufferWrapper):\n \"\"\"ReplayBuffer (Ape-X 所属)\n\n 呼び出し例\n PrioritizedReplayBufferWrapper(envs[actor_num], actor_num, actor_fifo, framestack, data_format, multi_step_n=multi_step_n)\n\n \"\"\"\n\n def __init__(self, *args, multi_step_n=None, **kwargs):\n super(PrioritizedReplayBufferWrapper, self).__init__(*args, **kwargs)\n self.transition_buffer = None\n self.multi_step_n = multi_step_n\n\n @classmethod\n def get_buffer_dtypes(cls, multi_step_n, framestack):\n return [tf.uint8, tf.float32, tf.bool, tf.int32, tf.float32, tf.float32] * (multi_step_n + framestack)\n\n @classmethod\n def get_buffer_shapes(cls, env, multi_step_n, num_stacked_frames, data_format):\n b = (env.batch_size,)\n if data_format == 'NCHW':\n obs_space = env.observation_space[-1], env.observation_space[1], env.observation_space[2]\n else:\n obs_space = env.observation_space[1:]\n shapes = [\n obs_space, # Image\n (), # Reward\n (), # Done\n (), # Action\n (env.action_space,), # Q Values\n (), # Selected Q Value\n ]\n shapes = [b + s for s in shapes]\n return shapes * (multi_step_n + num_stacked_frames)\n\n def step(self, action, indices=None, name=None, q_values=None, q_t_selected=None):\n \"\"\"環境を1stepすすめる\n\n 呼び出し例\n env.step(output_actions, q_values=q_values, q_t_selected=q_t_selected)\n\n\n :param tf.Tensor action: 選んだアクション [batch_size]\n :param indices:\n :param name:\n :param tf.Tensor q_values: 各アクションのQ(s,a) [batch_size, num_actions]\n :param tf.Tensor q_t_selected: 選んだアクションの評価値 [batch_size]\n :return:\n \"\"\"\n\n assert indices is None\n assert q_values is not None\n assert q_t_selected is not None\n batch_size = self.env.batch_size\n # NHWCの画像がとれる\n sliced_act_obs = self.env.observation(indices)\n if self.data_format == 'NCHW':\n sliced_act_obs = tf.transpose(sliced_act_obs, (0, 3, 1, 2))\n\n sliced_act_obs = tf.image.convert_image_dtype(sliced_act_obs, tf.uint8)\n assert sliced_act_obs.dtype == tf.uint8\n\n with tf.device('/cpu:0'):\n _, recent_obs_done = self.buffer.encode_history()\n\n # 最後のnum_stacked_frames-1分だけrecent_obs_doneからとってくる\n observations, dones=zip( * recent_obs_done[1 - self.num_stacked_frames:])\n # 最新の観測を足す Invadorだと(4,1,84,84)が4つのlist\n observations += (sliced_act_obs,)\n # (4,)のboolが4つのlist\n dones += (None,)\n\n obs = make_masked_frame(observations, dones, self.data_format)\n with tf.control_dependencies([sliced_act_obs]):\n rew, done = self.env.step(action=action, indices=indices, name=name)\n update_recent_history = self.buffer.enqueue([sliced_act_obs, done])\n\n # (action前状態, 報酬, 終わったかどうか, 選択したアクション, Q[batch_size,num_action], 選んだアクションの価値[batch_size])\n current_frame = sliced_act_obs, rew, done, action, q_values, q_t_selected\n if self.transition_buffer is None:\n with tf.control_dependencies(None):\n with tf.device('/cpu:0'):\n self.transition_buffer = ShortTermBuffer(shapes=[v.get_shape() for v in current_frame], dtypes=[v.dtype for v in current_frame], framestack=self.num_stacked_frames, multi_step=self.multi_step_n)\n\n # ShortTermBufferに現在の状態を足す\n # historyにはnum_stacked_frame+multi-step分のcurrent_frame列が入る\n is_valid, history = self.transition_buffer.enqueue(current_frame)\n\n history = [e for t in history for e in t]\n replay_queue_shapes = [(None,) + tuple(a.get_shape()[1:]) for a in history]\n\n enqueue_op = tf.cond(is_valid, lambda: self.queue.enqueue(history), tf.no_op)\n\n with tf.control_dependencies([enqueue_op, update_recent_history[0].op]):\n return tf.identity(rew), tf.identity(done)\n"
] | [
[
"tensorflow.logical_not",
"tensorflow.device",
"tensorflow.expand_dims",
"tensorflow.image.convert_image_dtype",
"tensorflow.concat",
"tensorflow.identity",
"tensorflow.transpose",
"tensorflow.control_dependencies"
]
] |
wahyutirta/CNN-numpy | [
"d66e10a53304a0c72c40f278486866493f573d5e"
] | [
"main-1.1.py"
] | [
"from PyQt5.QtWidgets import *\nimport sys,pickle\nimport os\n\nfrom PyQt5 import uic, QtWidgets ,QtCore, QtGui\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import QDir, Qt, QSortFilterProxyModel\nfrom PyQt5.QtWidgets import QDialog ,QApplication, QFileDialog, QWidget, QTextEdit, QLabel\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\nfrom PyQt5.QtGui import QImage\nimport cv2, imutils\nfrom einops import rearrange, reduce, repeat\nfrom lenet5 import *\nimport numpy as np\n\nimport matplotlib as plt\nplt.use('Qt5Agg')\n#matplotlib.use('Qt5Agg')\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.ticker as ticker\n\n\nmain_path = os.path.dirname(os.path.abspath(__file__)) #file path main.py\nwork_path = os.path.split(main_path) #path working folder (whole file project)\nui_folder = os.path.join(main_path,\"ui/\") #ui_folder path\n\n\nclass MplCanvas(FigureCanvas):\n def __init__(self, parent=None, width=4, height=7, dpi=100):\n self.fig = Figure(figsize=(width, height), dpi=dpi)\n \n \n super(MplCanvas, self).__init__(self.fig)\n #self.fig.tight_layout()\n \n\t\t\n\n\nclass error_window(QMainWindow):\n def __init__(self):\n super(error_window, self).__init__()\n\n\nclass App(QMainWindow):\n def __init__(self):\n super(App, self).__init__()\n\n self.ui = uic.loadUi(os.path.join(ui_folder,\"main2.ui\"), self)\n \n self.filePath = None\n self.methods = [\"adam\", \"rmsprop\"]\n self.learningRate = [\"0.001\", \"0.0001\"]\n self.batch = [\"32\"]\n self.epochs = [\"101\", \"151\", \"201\"]\n \n self.output = None\n \n self.optimizerCombo.addItems(self.methods)\n self.learningRateCombo.addItems(self.learningRate)\n self.epochsCombo.addItems(self.epochs)\n self.batchCombo.addItems(self.batch)\n \n self.lenet = None\n if self.lenet == None:\n self.modelLabel.setText(\"No Model\")\n \n self.openImageBtn.clicked.connect(self.browseImage)\n self.loadModelBtn.clicked.connect(self.browseModel)\n self.recogImageBtn.clicked.connect(self.predictImage)\n imagePath = \"data_jepun\"\n self.data = Data(main_path, imagePath)\n self.label = self.data.loadLabel()\n \n self.optimizerCombo.currentIndexChanged.connect(self.resetModel)\n self.learningRateCombo.currentIndexChanged.connect(self.resetModel)\n self.epochsCombo.currentIndexChanged.connect(self.resetModel)\n self.batchCombo.currentIndexChanged.connect(self.resetModel)\n \n \n def resetModel(self):\n self.lenet = None\n \n if self.lenet == None:\n self.output = self.modelLabel.setText(\"No Model\")\n print(\"model null\")\n \n\n def browseImage(self):\n self.filePath = QFileDialog.getOpenFileName(filter=\"Image (*.*)\")[0]\n _, self.fname = os.path.split(self.filePath)\n self.textFname.setText(self.fname)\n print(self.filePath) \n self.image = cv2.imread(self.filePath)\n self.setPhoto(self.image)\n \n #clear canvas\n self.canvas1 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas1, 1, 6, 1, 1)\n self.canvas1.fig.clf()\n \n self.canvas2 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas2, 1, 7, 1, 1)\n self.canvas2.fig.clf()\n\n\n \n def setPhoto(self,image):\n \"\"\" This function will take image input and resize it \n\t\t\tonly for display purpose and convert it to QImage\n\t\t\tto set at the label.\n\t\t\"\"\"\n self.tmp = image\n image = imutils.resize(image,width=300)\n frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = QImage(frame, frame.shape[1],frame.shape[0],frame.strides[0],QImage.Format_RGB888)\n self.imageSpace.setPixmap(QtGui.QPixmap.fromImage(image))\n \n \n def browseModel(self):\n \n method = self.optimizerCombo.currentText()\n learningRate = self.learningRateCombo.currentText()\n epochs = self.epochsCombo.currentText()\n batch = self.batchCombo.currentText()\n print(method, learningRate, epochs, batch)\n self.lenet = LENET5( method = method, epochs = epochs, batch = batch, learningRate = learningRate) \n \n self.lenet.load_parameters(mainPath=main_path,epochs=epochs,method=method, batch=batch, learningRate=learningRate)\n if self.lenet != None:\n self.output = self.modelLabel.setText(\"Model Loaded\")\n \n def predictImage(self):\n self.output = self.lenet.one_image(self.lenet.layers, self.filePath)\n\n indeks = np.argmax(self.output)\n\n self.predLabel.setText(self.label[indeks])\n pribability = str(self.output[0,indeks] * 100)\n self.probLabel.setText(str(pribability + \"%\"))\n \n features1 = self.lenet.displayFeature(self.lenet.layers, self.filePath, 1)\n features1 = features1.astype(np.uint8)\n self.features1 = features1\n \n features2 = self.lenet.displayFeature(self.lenet.layers, self.filePath, 2)\n features2 = features2.astype(np.uint8)\n self.canvasManager(features1,features2)\n \n def canvasManager(self,features1, features2):\n \n self.canvas1 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas1, 1, 6, 1, 1)\n App.plot(self.canvas1,features1)\n \n self.canvas2 = MplCanvas(self, width=4, height=6, dpi=100)\n self.ui.gridLayout_4.addWidget(self.canvas2, 1, 7, 1, 1)\n App.plot(self.canvas2,features2)\n\n \"\"\"\n rows = 3\n columns = 2\n counter = 1\n print(features.shape)\n for feature in features:\n \n print(feature)\n title = str(\"feature \" + str(counter))\n self.canvas.axes = self.canvas.fig.add_subplot(rows, columns, counter)\n \n \n self.canvas.axes.imshow(feature)\n self.canvas.axes.axis(\"off\")\n self.canvas.axes.set_title(title)\n counter += 1\n \n self.canvas.draw()\n \"\"\"\n @staticmethod\n def plot(canvas,features):\n\n rows = 3\n columns = 2\n counter = 1\n print(features.shape)\n for feature in features:\n \n print(feature)\n title = str(\"feature \" + str(counter))\n canvas.axes = canvas.fig.add_subplot(rows, columns, counter)\n \n \n canvas.axes.imshow(feature)\n canvas.axes.axis(\"off\")\n canvas.axes.set_title(title)\n counter += 1\n \n canvas.draw()\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = App()\nwidget = QtWidgets.QStackedWidget()\nwidget.addWidget(window)\nwidget.setFixedWidth(1070)\nwidget.setFixedHeight(660)\nwidget.show()\napp.exec_()\n#sys.exit( app.exec_() )\n\n\n"
] | [
[
"matplotlib.use",
"matplotlib.figure.Figure",
"numpy.argmax"
]
] |
huskermiao/MaizeLeafCounting | [
"68d3d8e8bebf2dc74f2aa79a3fc62aca67de1dbb"
] | [
"CountingByDetection_FasterRCNNs/cocoeval.py"
] | [
"__author__ = 'tsungyi'\n\nimport numpy as np\nimport datetime\nimport time\nfrom collections import defaultdict\nimport mask as maskUtils\nimport copy\n\n\nclass COCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = p.kpt_oks_sigmas\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()\n\nclass Params:\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None"
] | [
[
"numpy.ones",
"numpy.spacing",
"numpy.cumsum",
"numpy.zeros",
"numpy.searchsorted",
"numpy.argsort",
"numpy.repeat",
"numpy.exp",
"numpy.count_nonzero",
"numpy.logical_not",
"numpy.where",
"numpy.max",
"numpy.array",
"numpy.concatenate",
"numpy.round",
"numpy.unique",
"numpy.mean"
]
] |
yalov4uk/ML-labs | [
"ca944610614c182259783449d9ec6e9135d6aaf1"
] | [
"5/download.py"
] | [
"import os\nimport tarfile\nimport email\nimport re\nimport nltk\nimport urlextract\nimport numpy as np\nimport scipy.io as sio\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom nltk.stem import PorterStemmer\nfrom html import unescape\nfrom email import parser\nfrom email.policy import default\nfrom six.moves import urllib\nfrom collections import Counter\n\nDOWNLOAD_ROOT = \"http://spamassassin.apache.org/old/publiccorpus/\"\nHAM_URL = DOWNLOAD_ROOT + \"20030228_easy_ham.tar.bz2\"\nSPAM_URL = DOWNLOAD_ROOT + \"20030228_spam.tar.bz2\"\nSPAM_PATH = os.path.join(\"datasets\", \"spam\")\n\n\ndef fetch_spam_data(spam_url=SPAM_URL, spam_path=SPAM_PATH):\n if not os.path.isdir(spam_path):\n os.makedirs(spam_path)\n for filename, url in ((\"ham.tar.bz2\", HAM_URL), (\"spam.tar.bz2\", SPAM_URL)):\n path = os.path.join(spam_path, filename)\n if not os.path.isfile(path):\n urllib.request.urlretrieve(url, path)\n tar_bz2_file = tarfile.open(path)\n tar_bz2_file.extractall(path=SPAM_PATH)\n tar_bz2_file.close()\n\n\n# fetch_spam_data()\n\nHAM_DIR = os.path.join(SPAM_PATH, \"easy_ham\")\nSPAM_DIR = os.path.join(SPAM_PATH, \"spam\")\nham_filenames = [name for name in sorted(os.listdir(HAM_DIR)) if len(name) > 20]\nspam_filenames = [name for name in sorted(os.listdir(SPAM_DIR)) if len(name) > 20]\n\nprint(len(ham_filenames))\nprint(len(spam_filenames))\n\n\ndef load_email(is_spam, filename, spam_path=SPAM_PATH):\n directory = \"spam\" if is_spam else \"easy_ham\"\n with open(os.path.join(spam_path, directory, filename), \"rb\") as f:\n return parser.BytesParser(policy=email.policy.default).parse(f)\n\n\nham_emails = [load_email(is_spam=False, filename=name) for name in ham_filenames]\nspam_emails = [load_email(is_spam=True, filename=name) for name in spam_filenames]\n\nprint(ham_emails[4].get_content().strip())\nprint(spam_emails[5].get_content().strip())\n\n\ndef get_email_structure(email):\n if isinstance(email, str):\n return email\n payload = email.get_payload()\n if isinstance(payload, list):\n return \"multipart({})\".format(\", \".join([\n get_email_structure(sub_email)\n for sub_email in payload\n ]))\n else:\n return email.get_content_type()\n\n\ndef structures_counter(emails):\n structures = Counter()\n for email in emails:\n structure = get_email_structure(email)\n structures[structure] += 1\n return structures\n\n\nprint(structures_counter(ham_emails).most_common())\nprint('\\n')\nprint(structures_counter(spam_emails).most_common())\n\nfor header, value in spam_emails[0].items():\n print(header, \":\", value)\n\n\ndef html_to_plain_text(html):\n text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I)\n text = re.sub('<a\\s.*?>', ' httpaddr ', text, flags=re.M | re.S | re.I)\n text = re.sub('<.*?>', '', text, flags=re.M | re.S)\n text = re.sub(r'(\\s*\\n)+', '\\n', text, flags=re.M | re.S)\n return unescape(text)\n\n\nhtml_spam_emails = [email for email in spam_emails\n if get_email_structure(email) == \"text/html\"]\n\nsample_html_spam = html_spam_emails[7]\nprint(\"\\nSpam email html sample:\\n\")\nprint(sample_html_spam.get_content().strip()[:1000], \"...\")\nprint(\"\\nEmail content: \\n\")\nprint(html_to_plain_text(sample_html_spam.get_content())[:1000], \"...\")\n\n\ndef email_to_text(email):\n html = None\n for part in email.walk():\n ctype = part.get_content_type()\n if not ctype in (\"text/plain\", \"text/html\"):\n continue\n try:\n content = part.get_content()\n except: # in case of encoding issues\n content = str(part.get_payload())\n if ctype == \"text/plain\":\n return content\n else:\n html = content\n if html:\n return html_to_plain_text(html)\n\n\nprint(email_to_text(sample_html_spam)[:100], \"...\")\n\ntry:\n stemmer = nltk.PorterStemmer()\n for word in (\"Computations\", \"Computation\", \"Computing\", \"Computed\", \"Compute\", \"Compulsive\"):\n print(word, \"=>\", stemmer.stem(word))\nexcept ImportError:\n print(\"Error: stemming requires the NLTK module.\")\n stemmer = None\n\ntry:\n url_extractor = urlextract.URLExtract()\n print(url_extractor.find_urls(\"Will it detect github.com and https://youtu.be/7Pq-S557XQU?t=3m32s\"))\nexcept ImportError:\n print(\"Error: replacing URLs requires the urlextract module.\")\n url_extractor = None\n\n\nclass EmailToWordCounterTransformer(BaseEstimator, TransformerMixin):\n def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True,\n replace_urls=True, replace_numbers=True, stemming=True):\n self.strip_headers = strip_headers\n self.lower_case = lower_case\n self.remove_punctuation = remove_punctuation\n self.replace_urls = replace_urls\n self.replace_numbers = replace_numbers\n self.stemming = stemming\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X_transformed = []\n for email in X:\n text = email_to_text(email) or \"\"\n if self.lower_case:\n text = text.lower()\n text = re.sub(\"[$]+\", \" dollar \", text)\n text = re.sub(\"[^\\s]+@[^\\s]+\", \" emailaddr \", text)\n if self.replace_urls and url_extractor is not None:\n urls = list(set(url_extractor.find_urls(text)))\n urls.sort(key=lambda url: len(url), reverse=True)\n for url in urls:\n text = text.replace(url, \" httpaddr \")\n if self.replace_numbers:\n text = re.sub(r'\\d+(?:\\.\\d*(?:[eE]\\d+))?', 'NUMBER', text)\n if self.remove_punctuation:\n text = re.sub(r'\\W+', ' ', text, flags=re.M)\n special_chars = [\n \"<\", \"[\", \"^\", \">\", \"+\", \"?\", \"!\", \"'\", \".\", \",\", \":\",\n \"*\", \"%\", \"#\", \"_\", \"=\"\n ]\n for char in special_chars:\n text = text.replace(str(char), \"\")\n word_counts = Counter(text.split())\n if self.stemming and stemmer is not None:\n stemmed_word_counts = Counter()\n for word, count in word_counts.items():\n stemmed_word = stemmer.stem(word)\n stemmed_word_counts[stemmed_word] += count\n word_counts = stemmed_word_counts\n X_transformed.append(word_counts)\n return np.array(X_transformed)\n\n\nvocab = EmailToWordCounterTransformer().fit_transform(spam_emails)\nvocab = sum(vocab, Counter())\n\nlist = vocab.most_common(1904)\nvocab = []\nfor (k, v) in list:\n vocab.append(k)\n\nvocab = sorted(vocab)\n\n# SAVE DICTIONARY\ni = 0\nwith open('../data/vocab2.txt', 'w') as f:\n for item in vocab:\n try:\n f.write(\"%s\\t%s\\n\" % (i, item))\n i += 1\n except:\n print('error')\n\nsamples = len(ham_filenames) + len(spam_filenames)\n\nvocabList = open('../data/vocab2.txt', \"r\").read()\nvocabList = vocabList.split(\"\\n\")\nvocabList_d = {}\nfor ea in vocabList:\n if ea:\n [value, key] = ea.split(\"\\t\")\n vocabList_d[key] = value\n\nprint(vocabList_d)\nprint(email_to_text(spam_emails[0]))\n\n\ndef process_email(email_contents):\n \"\"\"\n Preprocesses the body of an email and returns a list of indices of the words contained in the email.\n \"\"\"\n # a - Lower case\n email_contents = email_contents.lower()\n\n # b - remove html/xml tags\n email_contents = re.sub(\"<[^>]*>\", \" \", email_contents).split(\" \")\n email_contents = filter(len, email_contents)\n email_contents = ' '.join(email_contents)\n\n # c - Handle URLS\n email_contents = re.sub(\"[http|https]://[^\\s]*\", \"httpaddr\", email_contents)\n\n # d - Handle Email Addresses\n email_contents = re.sub(\"[^\\s]+@[^\\s]+\", \"emailaddr\", email_contents)\n\n # e - Handle numbers\n email_contents = re.sub(\"[0-9]+\", \"number\", email_contents)\n\n # f - Handle $ sign\n email_contents = re.sub(\"[$]+\", \"dollar\", email_contents)\n\n # Strip all special characters\n special_chars = [\n \"<\", \"[\", \"^\", \">\", \"+\", \"?\", \"!\", \"'\", \".\", \",\", \":\",\n \"*\", \"%\", \"#\", \"_\", \"=\"\n ]\n for char in special_chars:\n email_contents = email_contents.replace(str(char), \"\")\n email_contents = email_contents.replace(\"\\n\", \" \")\n\n # Stem the word\n ps = PorterStemmer()\n email_contents = [ps.stem(token) for token in email_contents.split(\" \")]\n email_contents = \" \".join(email_contents)\n\n return email_contents\n\n\ndef find_word_indices(processed_email, vocabList_d):\n # Process the email and return word_indices\n\n word_indices = []\n\n for char in processed_email.split():\n if len(char) > 1 and char in vocabList_d:\n word_indices.append(int(vocabList_d[char]))\n\n return word_indices\n\n\ndef email_features(word_indices, vocabList_d):\n \"\"\"\n Takes in a word_indices vector and produces a feature vector from the word indices.\n \"\"\"\n n = len(vocabList_d)\n\n features = np.zeros((n, 1))\n\n for i in word_indices:\n features[i] = 1\n\n return features\n\n\ndef transform_email_to_features(email_contents, vocabList_d):\n # print(email_contents)\n processed_email = process_email(email_contents)\n word_indices = find_word_indices(processed_email, vocabList_d)\n features = email_features(word_indices, vocabList_d)\n\n return features\n\n\n# train\nX = []\nY = []\n\nprint(len(spam_emails))\nprint(len(ham_emails))\n\nfor i in range(400):\n sp = email_to_text(spam_emails[i])\n if sp:\n a = transform_email_to_features(sp, vocabList_d)\n X.append(a.flatten())\n Y.append(1)\nfor i in range(2000):\n em = email_to_text(ham_emails[i])\n if em:\n X.append(transform_email_to_features(em, vocabList_d).flatten())\n Y.append(0)\n\nsio.savemat('../data/myTrain.mat', {'X': X, 'y': Y})\n\n# test\nX = []\nY = []\n\nfor i in range(401, 500, 1):\n sp = email_to_text(spam_emails[i])\n if sp:\n a = transform_email_to_features(sp, vocabList_d)\n X.append(a.flatten())\n Y.append(1)\nfor i in range(2001, 2500, 1):\n em = email_to_text(ham_emails[i])\n if em:\n X.append(transform_email_to_features(em, vocabList_d).flatten())\n Y.append(0)\n\nsio.savemat('../data/myTest.mat', {'Xtest': X, 'ytest': Y})\n"
] | [
[
"numpy.array",
"scipy.io.savemat",
"numpy.zeros"
]
] |
ZhaoJY1/nussl | [
"af7d0c50e01d107f4ef3305b89eb130d95d0a7cd"
] | [
"tests/ml/test_overfit.py"
] | [
"from nussl import ml, datasets, evaluation\nimport tempfile\nfrom torch import optim\nimport numpy as np\nimport logging\nimport os\nimport torch\nfrom matplotlib import pyplot as plt\n\nlogging.basicConfig(\n format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d:%H:%M:%S',\n level=logging.INFO\n)\n\nfix_dir = 'tests/local/trainer'\n\n\ndef test_overfit_a(mix_source_folder):\n tfms = datasets.transforms.Compose([\n datasets.transforms.PhaseSensitiveSpectrumApproximation(),\n datasets.transforms.ToSeparationModel(),\n datasets.transforms.Cache('~/.nussl/tests/cache', overwrite=True),\n datasets.transforms.GetExcerpt(400)\n ])\n dataset = datasets.MixSourceFolder(\n mix_source_folder, transform=tfms)\n\n ml.train.cache_dataset(dataset)\n dataset.cache_populated = True\n\n dataloader = torch.utils.data.DataLoader(\n dataset, shuffle=True, batch_size=len(dataset), num_workers=2)\n\n # create the model, based on the first item in the dataset\n # second bit of the shape is the number of features\n n_features = dataset[0]['mix_magnitude'].shape[1]\n mi_config = ml.networks.builders.build_recurrent_mask_inference(\n n_features, 50, 1, False, 0.0, 2, 'sigmoid',\n )\n\n model = ml.SeparationModel(mi_config)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n if device == 'cuda':\n epoch_length = 100\n else:\n epoch_length = 10\n model = model.to(device)\n # create optimizer\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n loss_dictionary = {\n 'L1Loss': {\n 'weight': 1.0\n }\n }\n\n train_closure = ml.train.closures.TrainClosure(\n loss_dictionary, optimizer, model)\n val_closure = ml.train.closures.ValidationClosure(\n loss_dictionary, model)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n _dir = fix_dir if fix_dir else tmpdir\n os.makedirs(os.path.join(_dir, 'plots'), exist_ok=True)\n\n trainer, validator = ml.train.create_train_and_validation_engines(\n train_closure, val_closure, device=device\n )\n\n # add handlers to engine\n ml.train.add_stdout_handler(trainer, validator)\n ml.train.add_validate_and_checkpoint(\n _dir, model, optimizer, dataset,\n trainer, val_data=dataloader, validator=validator)\n ml.train.add_tensorboard_handler(_dir, trainer)\n\n # run engine\n trainer.run(dataloader, max_epochs=5, epoch_length=epoch_length)\n\n model_path = os.path.join(\n trainer.state.output_folder, 'checkpoints', 'best.model.pth')\n state_dict = torch.load(\n model_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(state_dict['state_dict'])\n\n history = state_dict['metadata']['trainer.state.epoch_history']\n\n for key in history:\n plt.figure(figsize=(10, 4))\n plt.title(f\"epoch:{key}\")\n plt.plot(np.array(history[key]).reshape(-1, ))\n plt.savefig(os.path.join(\n trainer.state.output_folder, 'plots',\n f\"epoch:{key.replace('/', ':')}.png\"))\n\n"
] | [
[
"torch.load",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"torch.cuda.is_available",
"numpy.array"
]
] |
Wason1/Rows-to-columns-consolidator | [
"21ee991ba907ba61708d24acfc6d5b3a3e754677"
] | [
"script.py"
] | [
"#Import Libs\nimport pandas as pd\n\n# Inputs\n#col_name = input('What is the name of the column to convert to columns?: ')\n#keyz = input('what are the names of the columns that uniquely identify a row? (seperate these with pipes \"|\"):')\n#keyz.split('|')\nfile_dir = r'data.xlsx'\noutput_dir = r'data-out.xlsx'\nlist_key_cols = ['Person - Medical Record Number', 'CE-Verified DT/TM']\n# This is the column that you want to return the unique items and make those the unique iterms the headings for new columns\nsplit_col = 'Clinical Event'\n# Use this data to fill in the new columns\nfiller_col = 'Clinical Event Result'\n\n# Create Dataframe\ndf = pd.read_excel(\n file_dir\n )\n\n#convert to strings\ndf = df.applymap(str)\n# Make the primary key\nseries_primary_key = df[list_key_cols].sum(1)\ndf['primary-key']=series_primary_key\n# new base dataframe\ndfa = df.drop(\n [split_col, filler_col],\n axis='columns'\n )\n# drop duplicates\ndfa.drop_duplicates(\n keep='first',\n inplace=True,\n)\n\ndfa.set_index(keys='primary-key', inplace=True)\n\n# new columns\nnew_cols = pd.unique(df[split_col])\nfor item in new_cols:\n dfa[item]=''\n\n# Iterate and fill in dfa\nfor a_key in series_primary_key:\n for col_name in new_cols:\n df_temp = df.loc[df['primary-key'] == a_key]\n df_temp = df_temp.loc[df_temp[split_col] == col_name]\n df_temp.reset_index(inplace=True)\n try:\n cell_text = df_temp.at[0, filler_col]\n dfa.at[a_key, col_name] = cell_text\n except:\n print('1')\n\n# Export dataframe\ndfa.to_excel(output_dir, index = False)"
] | [
[
"pandas.unique",
"pandas.read_excel"
]
] |
ltqusst/lpc_vocoder | [
"baf29d40dcf9f4b80a73146dca939c7841045441"
] | [
"sws.py"
] | [
"## MIT License\n\n# Copyright (c) 2017 John Williamson\n# Copyright (c) 2008 Cournapeau David\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport numpy as np\nimport scipy, scipy.io, scipy.io.wavfile, scipy.signal\nimport os\nfrom pathlib import Path\nimport argparse\n\n# This function is copied directly from https://github.com/cournape/talkbox/blob/master/scikits/talkbox/linpred/py_lpc.py\n# Copyright (c) 2008 Cournapeau David\n# (MIT licensed)\ndef levinson_1d(r, order):\n \"\"\"Levinson-Durbin recursion, to efficiently solve symmetric linear systems\n with toeplitz structure.\n\n Parameters\n ---------\n r : array-like\n input array to invert (since the matrix is symmetric Toeplitz, the\n corresponding pxp matrix is defined by p items only). Generally the\n autocorrelation of the signal for linear prediction coefficients\n estimation. The first item must be a non zero real.\n\n Notes\n ----\n This implementation is in python, hence unsuitable for any serious\n computation. Use it as educational and reference purpose only.\n\n Levinson is a well-known algorithm to solve the Hermitian toeplitz\n equation:\n\n _ _\n -R[1] = R[0] R[1] ... R[p-1] a[1]\n : : : : * :\n : : : _ * :\n -R[p] = R[p-1] R[p-2] ... R[0] a[p]\n _\n with respect to a ( is the complex conjugate). Using the special symmetry\n in the matrix, the inversion can be done in O(p^2) instead of O(p^3).\n \"\"\"\n r = np.atleast_1d(r)\n if r.ndim > 1:\n raise ValueError(\"Only rank 1 are supported for now.\")\n\n n = r.size\n if n < 1:\n raise ValueError(\"Cannot operate on empty array !\")\n elif order > n - 1:\n raise ValueError(\"Order should be <= size-1\")\n\n if not np.isreal(r[0]):\n raise ValueError(\"First item of input must be real.\")\n elif not np.isfinite(1 / r[0]):\n raise ValueError(\"First item should be != 0\")\n\n # Estimated coefficients\n a = np.empty(order + 1, r.dtype)\n # temporary array\n t = np.empty(order + 1, r.dtype)\n # Reflection coefficients\n k = np.empty(order, r.dtype)\n\n a[0] = 1.0\n e = r[0]\n\n for i in range(1, order + 1):\n acc = r[i]\n for j in range(1, i):\n acc += a[j] * r[i - j]\n k[i - 1] = -acc / e\n a[i] = k[i - 1]\n\n for j in range(order):\n t[j] = a[j]\n\n for j in range(1, i):\n a[j] += k[i - 1] * np.conj(t[i - j])\n\n e *= 1 - k[i - 1] * np.conj(k[i - 1])\n\n return a, e, k\n\n\nfrom numpy.polynomial import polynomial as P\n\n\ndef lsp_to_lpc(lsp):\n \"\"\"Convert line spectral pairs to LPC\"\"\"\n ps = np.concatenate((lsp[:, 0], -lsp[::-1, 0], [np.pi]))\n qs = np.concatenate((lsp[:, 1], [0], -lsp[::-1, 1]))\n\n p = np.cos(ps) - np.sin(ps) * 1.0j\n q = np.cos(qs) - np.sin(qs) * 1.0j\n\n p = np.real(P.polyfromroots(p))\n q = -np.real(P.polyfromroots(q))\n\n a = 0.5 * (p + q)\n return a[:-1]\n\n\ndef lpc_noise_synthesize(lpc, samples=10000):\n \"\"\"Apply LPC coefficients to white noise\"\"\"\n phase = np.random.uniform(0, 0.5, (samples))\n signal = scipy.signal.lfilter([1.0], lpc, phase)\n return signal\n\n\ndef lpc_buzz_synthesize(lpc, f, sr, samples=10000):\n \"\"\"Apply LPC coefficients to a sawtooth with the given frequency and sample rate\"\"\"\n phase = scipy.signal.sawtooth(2 * np.pi * f * np.arange(samples) / (sr))\n signal = scipy.signal.lfilter([1.0], lpc, phase)\n return signal\n\n\ndef lpc_to_lsp(lpc):\n \"\"\"Convert LPC to line spectral pairs\"\"\"\n l = len(lpc) + 1\n a = np.zeros((l,))\n a[0:-1] = lpc\n p = np.zeros((l,))\n q = np.zeros((l,))\n for i in range(l):\n j = l - i - 1\n p[i] = a[i] + a[j]\n q[i] = a[i] - a[j]\n\n ps = np.sort(np.angle(np.roots(p)))\n qs = np.sort(np.angle(np.roots(q)))\n lsp = np.vstack([ps[: len(ps) // 2], qs[: len(qs) // 2]]).T\n return lsp\n\n\ndef lpc_to_formants(lpc, sr):\n \"\"\"Convert LPC to formants \n \"\"\"\n\n # extract roots, get angle and radius\n roots = np.roots(lpc)\n\n pos_roots = roots[np.imag(roots) >= 0]\n if len(pos_roots) < len(roots) // 2:\n pos_roots = list(pos_roots) + [0] * (len(roots) // 2 - len(pos_roots))\n if len(pos_roots) > len(roots) // 2:\n pos_roots = pos_roots[: len(roots) // 2]\n\n w = np.angle(pos_roots)\n a = np.abs(pos_roots)\n\n order = np.argsort(w)\n w = w[order]\n a = a[order]\n\n freqs = w * (sr / (2 * np.pi))\n bws = -0.5 * (sr / (2 * np.pi)) * np.log(a)\n\n # exclude DC and sr/2 frequencies\n return freqs, bws\n\n\ndef load_wave(fname):\n \"\"\"Load a 16 bit wave file and return normalised in 0,1 range.\n Convert stereo WAV to mono by simple averaging. \"\"\"\n # load and return a wave file\n sr, wave = scipy.io.wavfile.read(fname)\n # convert to mono\n if len(wave.shape) > 1:\n wave = np.mean(wave, axis=1)\n return wave / 32768.0, sr\n\n\ndef lpc(wave, order):\n \"\"\"Compute LPC of the waveform. \n a: the LPC coefficients\n e: the total error\n k: the reflection coefficients\n \n Typically only a is required.\n \"\"\"\n # only use right half of autocorrelation, normalised by total length\n autocorr = scipy.signal.correlate(wave, wave)[len(wave) - 1 :] / len(wave)\n a, e, k = levinson_1d(autocorr, order)\n return a, e, k\n\n\ndef modfm_buzz(samples, f, sr, k):\n \"\"\"Generate a pulse train using modfm:\n y(t) = cos(x(t)) * exp(cos(x(t))*k - k)\n \n samples: number of samples to generate\n f: base frequency (Hz)\n sr: sample rate (Hz)\n k: modulation depth; higher has more harmonics but increases risk of aliasing\n (e.g. k=1000 for f=50, k=100 for f=200, k=2 for f=4000) \n \n \"\"\"\n t = np.arange(samples)\n phase = f * 2 * np.pi * (t / float(sr))\n # simple pulse oscillator (ModFM)\n buzz = np.cos(phase) * np.exp(np.cos(phase) * k - k)\n return buzz\n\n\ndef noise(samples):\n \"\"\"Generate white noise in range [-1,1]\n \n samples: number of samples to generate\n \"\"\"\n return np.random.uniform(-1, 1, size=samples)\n\n\ndef lpc_vocode(\n wave,\n frame_len,\n order,\n carrier,\n residual_amp=0.0,\n vocode_amp=1.0,\n env=False,\n freq_shift=1.0,\n):\n \"\"\"\n Apply LPC vocoding to a pair of signals using 50% overlap-add Hamming window resynthesis\n The modulator `wave` is applied to the carrier `imposed`\n \n Parameters:\n ---\n wave: modulator wave\n frame_len: length of frames\n order: LPC order (typically 2-30)\n carrier: carrier signal; should be at least as long as wave\n residual_amp: amplitude of LPC residual to include in output\n vocode_amp: amplitude of vocoded signal \n env: if True, the original volume envelope of wave is imposed on the output\n otherwise, no volume modulation is applied\n freq_shift: (default 1.0) shift the frequency of the resonances by the given scale factor. Warning :\n values >1.1 are usually unstable, and values <0.5 likewise.\n \"\"\"\n\n # precompute the hamming window\n window = scipy.signal.hann(frame_len)\n t = np.arange(frame_len)\n # allocate the array for the output\n vocode = np.zeros(len(wave + frame_len))\n last = np.zeros(order)\n # 50% window steps for overlap-add\n for i in range(0, len(wave), frame_len // 2):\n # slice the wave\n wave_slice = wave[i : i + frame_len]\n carrier_slice = carrier[i : i + frame_len]\n if len(wave_slice) == frame_len:\n # compute LPC\n a, error, reflection = lpc(wave_slice, order)\n\n # apply shifting in LSP space\n lsp = lpc_to_lsp(a)\n lsp = (lsp * freq_shift + np.pi) % (np.pi) - np.pi\n a = lsp_to_lpc(lsp)\n\n # compute the LPC residual\n residual = scipy.signal.lfilter(a, 1.0, wave_slice)\n # filter, using LPC as the *IIR* component\n # vocoded, last = scipy.signal.lfilter([1.], a, carrier_slice, zi=last)\n vocoded = scipy.signal.lfilter([1.0], a, carrier_slice)\n\n # match RMS of original signal\n if env:\n voc_amp = 1e-5 + np.sqrt(np.mean(vocoded ** 2))\n wave_amp = 1e-5 + np.sqrt(np.mean(wave_slice ** 2))\n vocoded = vocoded * (wave_amp / voc_amp)\n\n # Hann window 50%-overlap-add to remove clicking\n vocode[i : i + frame_len] += (\n vocoded * vocode_amp + residual * residual_amp\n ) * window\n\n return vocode[: len(wave)]\n\n\ndef get_formants(wave, frame_len, order, sr=44100, use_lsp=False):\n \"\"\"Plot the formants of the given wave form.\n \n Parameters:\n wave: Signal to analyse, as a 1D matrix\n frame_len: Length of analysis window, in samples\n order: Order of the LPC analysis performed\n sr: Sample rate, in Hz\n use_lsp: If True, use the LSP formant estimation instead of direct LPC\n \n Plots both the formant trace and the relative RMS power of the residual signal.\n \"\"\"\n formants = []\n formant_bw = []\n times = []\n res_rms = []\n env = []\n for i in range(0, len(wave), frame_len // 2):\n # slice the wave\n wave_slice = wave[i : i + frame_len]\n if len(wave_slice) == frame_len:\n # compute LPC\n a, error, reflection = lpc(wave_slice, order)\n\n # either use LSP (freq from mean angle, bw from spacing)\n if use_lsp:\n lsp = lpc_to_lsp(a)\n\n formants.append(-np.mean(lsp, axis=1) * (sr / (2 * np.pi)))\n formant_bw.append(0.5 * np.diff(lsp, axis=1)[:, 0] * (sr / (2 * np.pi)))\n\n else:\n # or use roots of LPC directly\n freq, bw = lpc_to_formants(a, sr)\n formants.append(freq)\n formant_bw.append(bw)\n\n times.append(i / float(sr))\n\n # compute the LPC residual\n residual = scipy.signal.lfilter(a, 1.0, wave_slice)\n rms = np.sqrt(np.mean(wave_slice ** 2))\n residual_rms = np.sqrt(np.mean(residual ** 2))\n res_rms.append(residual_rms)\n env.append(rms)\n\n return (\n np.array(times),\n np.array(formants),\n np.array(formant_bw),\n np.array(res_rms),\n np.array(env),\n )\n\n\ndef sinethesise(wave, frame_len, order, sr=44100, use_lsp=False, noise=1.0):\n times, formants, formant_bw, res_rms, env_rms = get_formants(\n wave, frame_len, order, sr, use_lsp\n )\n synthesize = np.zeros_like(wave)\n window = scipy.signal.hann(frame_len)\n t = np.arange(frame_len)\n k = 0\n for i in range(0, len(wave), frame_len // 2):\n\n if len(synthesize[i : i + frame_len]) == frame_len:\n # noise component\n syn_slice = (\n np.random.normal(0, 1, frame_len) * (res_rms[k] / env_rms[k]) * noise\n )\n\n # resonances\n for band in range(formants.shape[1]):\n freq = formants[k, band]\n bw = formant_bw[k, band]\n amp = 50.0 / (bw) # weight sines by inverse bandwidth\n syn_slice += np.sin(freq * (t + i) / (sr / (2 * np.pi))) * amp\n\n synthesize[i : i + frame_len] += window * syn_slice * env_rms[k]\n k += 1\n return synthesize\n\n\ndef bp_filter_and_decimate(x, low, high, fs, decimate=1):\n b, a = scipy.signal.butter(4, Wn=[low, high], btype=\"band\", fs=fs)\n decimated = scipy.signal.filtfilt(b, a, x)[::decimate]\n return decimated\n\ndef normalize(x):\n return x / np.max(x) \n\n\ndef upsample(x, factor):\n return scipy.signal.resample_poly(x, factor, 1)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"input_wav\", help=\"The input file, as a WAV file; ideally 44.1KHz mono.\"\n )\n parser.add_argument(\n \"output_wav\",\n nargs=\"?\",\n help=\"The output file to write to; defaults to <input>_sws.wav\",\n default=None,\n )\n parser.add_argument(\"--lp\", help=\"Lowpass filter cutoff\", type=float, default=100)\n parser.add_argument(\"--hp\", help=\"Highpass filter cutoff\", type=float, default=3000)\n parser.add_argument(\n \"--order\", \"-o\", help=\"LPC order; number of components in synthesis\", default=5, type=int\n )\n parser.add_argument(\n \"--use_lsp\",\n \"-l\",\n help=\"LPC order; number of components in synthesis\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--decimate\", \"-d\", help=\"Sample rate decimation before analysis\", default=4, type=int\n )\n parser.add_argument(\n \"--window\",\n \"-w\",\n type=int,\n help=\"LPC window size; smaller means faster changing signal; larger is smoother\",\n default=300,\n )\n parser.add_argument(\n \"--sine\",\n \"-s\",\n help=\"Resynthesise using sinewave speech (default)\",\n action=\"store_true\",\n default=True,\n )\n parser.add_argument(\n \"--buzz\",\n \"-b\",\n help=\"Resynthesie using buzz at given frequency (Hz)\",\n default=None,\n )\n parser.add_argument(\n \"--noise\", \"-n\", help=\"Resynthesize using filtered white noise\", action=\"store_true\"\n )\n\n args = parser.parse_args()\n\n args.output_wav = (\n args.output_wav or os.path.splitext(args.input_wav)[0] + \"_sws.wav\"\n )\n\n input_path = Path(args.input_wav)\n output_path = Path(args.output_wav)\n\n if not input_path.exists():\n print(f\"Cannot open {args.input_wav} for reading.\")\n exit(-1)\n\n \n wav, fs = load_wave(input_path)\n print(f\"Read {input_path}\")\n\n wav_filtered = normalize(bp_filter_and_decimate(\n wav, args.lp, args.hp, fs, decimate=args.decimate\n ))\n if args.sine:\n modulated = sinethesise(\n wav_filtered,\n frame_len=args.window,\n order=args.order,\n use_lsp=args.use_lsp,\n sr=fs / args.decimate,\n noise=0.0,\n )\n if args.buzz or args.noise:\n\n if args.buzz:\n N = 12 * np.log2(float(args.buzz)/440.0) + 69\n \n k = np.exp(-0.1513*N) + 15.927 # ModFM k values from: http://mural.maynoothuniversity.ie/4104/1/VL_New_perspectives.pdf\n \n carrier = modfm_buzz(len(wav_filtered), f=np.full(len(wav_filtered), args.buzz, dtype=np.float64),\n sr=float(fs/args.decimate), k=np.full(len(wav_filtered), k*k))\n if args.noise:\n carrier = np.random.normal(0,1,len(wav_filtered))\n\n modulated = lpc_vocode(wav_filtered, frame_len=args.window, order=args.order,\n carrier=carrier, residual_amp=0, vocode_amp=1, env=True, freq_shift=1)\n\n # un-decimate, normalize and write out\n up_modulated = normalize(upsample(modulated, args.decimate))\n \n scipy.io.wavfile.write(output_path, fs, up_modulated)\n print(f\"Wrote {output_path}\")\n"
] | [
[
"numpy.diff",
"numpy.argsort",
"numpy.log",
"scipy.signal.correlate",
"scipy.signal.resample_poly",
"numpy.isfinite",
"scipy.signal.butter",
"numpy.abs",
"scipy.io.wavfile.write",
"numpy.cos",
"scipy.io.wavfile.read",
"numpy.mean",
"numpy.random.uniform",
"scipy.signal.hann",
"numpy.zeros",
"numpy.random.normal",
"scipy.signal.filtfilt",
"numpy.arange",
"numpy.roots",
"numpy.max",
"numpy.polynomial.polynomial.polyfromroots",
"numpy.zeros_like",
"numpy.empty",
"numpy.conj",
"numpy.exp",
"numpy.atleast_1d",
"scipy.signal.lfilter",
"numpy.isreal",
"numpy.angle",
"numpy.array",
"numpy.sin",
"numpy.concatenate",
"numpy.imag"
]
] |
mokshagna517/recommendation_sys | [
"bc8ced225dff3c93d619ff5da363f42d0aa0676c"
] | [
"venv/Lib/site-packages/pandas/tests/extension/test_categorical.py"
] | [
"\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n\"\"\"\nimport string\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Categorical\nfrom pandas.api.types import CategoricalDtype\nfrom pandas.tests.extension import base\nimport pandas.util.testing as tm\n\n\ndef make_data():\n while True:\n values = np.random.choice(list(string.ascii_letters), size=100)\n # ensure we meet the requirements\n # 1. first two not null\n # 2. first and second are different\n if values[0] != values[1]:\n break\n return values\n\n\[email protected]\ndef dtype():\n return CategoricalDtype()\n\n\[email protected]\ndef data():\n \"\"\"Length-100 array for this type.\n\n * data[0] and data[1] should both be non missing\n * data[0] and data[1] should not gbe equal\n \"\"\"\n return Categorical(make_data())\n\n\[email protected]\ndef data_missing():\n \"\"\"Length 2 array with [NA, Valid]\"\"\"\n return Categorical([np.nan, \"A\"])\n\n\[email protected]\ndef data_for_sorting():\n return Categorical([\"A\", \"B\", \"C\"], categories=[\"C\", \"A\", \"B\"], ordered=True)\n\n\[email protected]\ndef data_missing_for_sorting():\n return Categorical([\"A\", None, \"B\"], categories=[\"B\", \"A\"], ordered=True)\n\n\[email protected]\ndef na_value():\n return np.nan\n\n\[email protected]\ndef data_for_grouping():\n return Categorical([\"a\", \"a\", None, None, \"b\", \"b\", \"a\", \"c\"])\n\n\nclass TestDtype(base.BaseDtypeTests):\n pass\n\n\nclass TestInterface(base.BaseInterfaceTests):\n @pytest.mark.skip(reason=\"Memory usage doesn't match\")\n def test_memory_usage(self, data):\n # Is this deliberate?\n super().test_memory_usage(data)\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n pass\n\n\nclass TestReshaping(base.BaseReshapingTests):\n def test_ravel(self, data):\n # GH#27199 Categorical.ravel returns self until after deprecation cycle\n with tm.assert_produces_warning(FutureWarning):\n data.ravel()\n\n\nclass TestGetitem(base.BaseGetitemTests):\n skip_take = pytest.mark.skip(reason=\"GH-20664.\")\n\n @pytest.mark.skip(reason=\"Backwards compatibility\")\n def test_getitem_scalar(self, data):\n # CategoricalDtype.type isn't \"correct\" since it should\n # be a parent of the elements (object). But don't want\n # to break things by changing.\n super().test_getitem_scalar(data)\n\n @skip_take\n def test_take(self, data, na_value, na_cmp):\n # TODO remove this once Categorical.take is fixed\n super().test_take(data, na_value, na_cmp)\n\n @skip_take\n def test_take_negative(self, data):\n super().test_take_negative(data)\n\n @skip_take\n def test_take_pandas_style_negative_raises(self, data, na_value):\n super().test_take_pandas_style_negative_raises(data, na_value)\n\n @skip_take\n def test_take_non_na_fill_value(self, data_missing):\n super().test_take_non_na_fill_value(data_missing)\n\n @skip_take\n def test_take_out_of_bounds_raises(self, data, allow_fill):\n return super().test_take_out_of_bounds_raises(data, allow_fill)\n\n @pytest.mark.skip(reason=\"GH-20747. Unobserved categories.\")\n def test_take_series(self, data):\n super().test_take_series(data)\n\n @skip_take\n def test_reindex_non_na_fill_value(self, data_missing):\n super().test_reindex_non_na_fill_value(data_missing)\n\n @pytest.mark.skip(reason=\"Categorical.take buggy\")\n def test_take_empty(self, data, na_value, na_cmp):\n super().test_take_empty(data, na_value, na_cmp)\n\n @pytest.mark.skip(reason=\"test not written correctly for categorical\")\n def test_reindex(self, data, na_value):\n super().test_reindex(data, na_value)\n\n\nclass TestSetitem(base.BaseSetitemTests):\n pass\n\n\nclass TestMissing(base.BaseMissingTests):\n @pytest.mark.skip(reason=\"Not implemented\")\n def test_fillna_limit_pad(self, data_missing):\n super().test_fillna_limit_pad(data_missing)\n\n @pytest.mark.skip(reason=\"Not implemented\")\n def test_fillna_limit_backfill(self, data_missing):\n super().test_fillna_limit_backfill(data_missing)\n\n\nclass TestReduce(base.BaseNoReduceTests):\n pass\n\n\nclass TestMethods(base.BaseMethodsTests):\n @pytest.mark.skip(reason=\"Unobserved categories included\")\n def test_value_counts(self, all_data, dropna):\n return super().test_value_counts(all_data, dropna)\n\n def test_combine_add(self, data_repeated):\n # GH 20825\n # When adding categoricals in combine, result is a string\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 + x2)\n expected = pd.Series(\n ([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))])\n )\n self.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 + x2)\n expected = pd.Series([a + val for a in list(orig_data1)])\n self.assert_series_equal(result, expected)\n\n @pytest.mark.skip(reason=\"Not Applicable\")\n def test_fillna_length_mismatch(self, data_missing):\n super().test_fillna_length_mismatch(data_missing)\n\n def test_searchsorted(self, data_for_sorting):\n if not data_for_sorting.ordered:\n raise pytest.skip(reason=\"searchsorted requires ordered data.\")\n\n\nclass TestCasting(base.BaseCastingTests):\n pass\n\n\nclass TestArithmeticOps(base.BaseArithmeticOpsTests):\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators):\n\n op_name = all_arithmetic_operators\n if op_name != \"__rmod__\":\n super().test_arith_series_with_scalar(data, op_name)\n else:\n pytest.skip(\"rmod never called when string is first argument\")\n\n def test_add_series_with_extension_array(self, data):\n ser = pd.Series(data)\n with pytest.raises(TypeError, match=\"cannot perform\"):\n ser + data\n\n def test_divmod_series_array(self):\n # GH 23287\n # skipping because it is not implemented\n pass\n\n def _check_divmod_op(self, s, op, other, exc=NotImplementedError):\n return super()._check_divmod_op(s, op, other, exc=TypeError)\n\n\nclass TestComparisonOps(base.BaseComparisonOpsTests):\n def _compare_other(self, s, data, op_name, other):\n op = self.get_op_from_name(op_name)\n if op_name == \"__eq__\":\n result = op(s, other)\n expected = s.combine(other, lambda x, y: x == y)\n assert (result == expected).all()\n\n elif op_name == \"__ne__\":\n result = op(s, other)\n expected = s.combine(other, lambda x, y: x != y)\n assert (result == expected).all()\n\n else:\n with pytest.raises(TypeError):\n op(data, other)\n\n\nclass TestParsing(base.BaseParsingTests):\n pass\n"
] | [
[
"pandas.util.testing.assert_produces_warning",
"pandas.Series",
"pandas.api.types.CategoricalDtype",
"pandas.Categorical"
]
] |
nicpittman/tropical_pacific_carbon_export | [
"eacd3e0382616388f418eb21cad859fe7ae0144a"
] | [
"9z_ENSO_spatial_maps.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 10:40:28 2020\n@author: Nic Pittman\n\nThis code will reproduce Figure 4 in Pittman et al., 2021. \n\nTrends and pvalues are calculated on the fly and not saved anywhere, however could be done easily. \nregridded data is required for this process\n\nThis results in a slower script but works well. All of the processing occurs in the main function.\nEasy to call modified version of this figure.\n\nProduces mean, trend and pval (Stipples) for the following:\n \n figs/Figure4_Spatial_map_update_'+ratio.name+'.png\n \n air-sea flux\n new production \n difference is calculated here\n SST\n TPCA chlorophyll (regridded) \n carbon (as processed into grams)\n \nRequires: \n datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc\n processed/seamask.nc\n processed/flux/fratios.nc\n \n processed/flux/avg_npp_rg_cafe.nc'\n processed/flux/tpca.nc\n datasets/sst/sst.mnmean.nc\n processed/flux/pco2grams.nc\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom carbon_math import *\nfrom mpl_toolkits.basemap import Basemap\nfrom scipy.stats import linregress\n\n \ndef plot_basemap():\n m = Basemap(llcrnrlon=120.,llcrnrlat=-15,urcrnrlon=290,urcrnrlat=15.01,\n resolution='l',projection='merc',fix_aspect=False)\n m.drawcoastlines()\n m.fillcontinents()\n # draw parallels # labels = [left,right,top,bottom]\n m.drawparallels(np.arange(-20,21,10),labels=[1,0,1,1],fontsize=12,latmax=20)\n m.drawmeridians(np.arange(-180,180,30),labels=[0,0,0,1],fontsize=12)\n return m\n\n\n\ndef plot_basemap_row(fig,axn,hovmol,units,title,units_tr,levs=None,levs_trend=None,trend_conversion=None,sb1=7,sb2=3,cmap='viridis',cmaptr='RdBu_r',wu=None,wv=None):\n '''\n Create a plotting function to make it repeatable and nicer\n colormaps should either be viridis or RdBu_r\n axis (number) will be 1,3,5,7 (plots both avg and trend at once)\n \n Unfortunately this function does the processing of mean, trends and pvals on the fly.\n Could save these if needed, but not provided here. \n '''\n fr=0.03\n fs=12\n ms=10\n startday=np.datetime64('2000-01-01')\n \n if title.endswith('pCO2t'):\n endday=np.datetime64('2016-12-01') \n print(title)\n elif title.endswith('chlorophyll'):\n endday=np.datetime64('2017-12-01')\n else:\n endday=np.datetime64('2020-01-01') \n \n ax1=fig.add_subplot(sb1,sb2,axn)\n m=plot_basemap()\n\n lo,la=np.meshgrid(hovmol.lon.values,hovmol.lat.values)\n lo1,la1=m(lo,la)\n \n if type(levs_trend)==type(None):\n f=m.contourf(lo1,la1,hovmol.mean(dim='time'),cmap=cmap) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n else:\n if title=='TPCA Chlorophyll':\n f=m.contourf(lo1,la1,hovmol.mean(dim='time'),extend='max',cmap=cmap,levels=levs) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n else:\n f=m.contourf(lo1,la1,hovmol.mean(dim='time'),cmap=cmap,levels=levs) #11 colors\n #Quick anti-aliasing fix as per: https://stackoverflow.com/questions/15822159/aliasing-when-saving-matplotlib-filled-contour-plot-to-pdf-or-eps\n for c in f.collections:\n c.set_edgecolor(\"face\")\n ax1.axhline(0,c='k',linestyle=':')\n\n moorings=[165,190,205,220,235,250]\n for x in moorings:\n x1,y1=m(x,0)\n ax1.plot(x1,y1,marker='x',c='k',markersize=ms)\n \n if title=='SST':\n \n lev=28.5#29.2 #rather than 28.5\n early_sst=hovmol.sel(time=slice('1997-01-01','2002-01-01')).mean(dim='time')#.where(co2.seamask==1)\n late_sst=hovmol.sel(time=slice('2015-01-01','2020-01-01')).mean(dim='time')#.where(co2.seamask==1)\n \n m.contour(lo1,la1,early_sst,levels=[lev],linestyles='dotted',colors='k')\n \n m.contour(lo1,la1,late_sst,levels=[lev],linestyles='solid',colors='k')\n m.contour(lo1,la1,hovmol.mean(dim='time'),levels=[25],linestyles='dashed',colors='k')\n \n \n #wu['lon'],wu['lat']=m(lo,la,wu.lon.values,wu.lat.values)\n #No windspeed vectors now\n #if title=='Wind speed':\n # skip=(slice(None,None,4),slice(None,None,4)) #2 for NCEP 2\n # m.quiver(lo1[skip],la1[skip],wu.mean(dim='time')[skip]/2,wv.mean(dim='time')[skip]/2,scale=90,headwidth=4.5)#,minshaft=2)\n\n\n cb=plt.colorbar(f,ax=ax1,fraction=fr)\n cb.set_label(units,fontsize=fs)\n cb.ax.tick_params(labelsize=fs-1)\n ax1.set_title(chr(ord('`')+axn)+') Average: '+title,fontsize=fs)\n ax1.tick_params(labelsize=fs)\n\n #Trends\n hovmol=hovmol.where(hovmol!=-0.9999,np.nan)\n hm=hovmol.interpolate_na(dim='time').sel(time=slice(startday,endday))\n months=hm.time\n \n dt_dates=pd.to_numeric(months.values.astype('datetime64[D]'))\n num_dates=dt_dates\n hm['time']=num_dates\n\n \n \n\n\n#Functions above make plotting easy.\n# # Code begins\n \n# %%Load data in\n \n#landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI_SOM-FFN_v2018.nc'\nlandsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'\n\n\n\nseamask=xr.open_dataset('processed/seamask.nc') #Because 2020 version doesn't have it.\nseamask= seamask.assign_coords(lon=(seamask.lon % 360)).roll(lon=(seamask.dims['lon']),roll_coords=False).sortby('lon')\t\n\n#It would be preferable to use the 2020 version,\n# landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'\n#However it doesn't include seamask so we are going to need both.... (Unless I save the seamask)\nlandschutzer=xr.open_dataset(landsch_fp)\nlandschutzer= landschutzer.assign_coords(lon=(landschutzer.lon % 360)).roll(lon=(landschutzer.dims['lon']),roll_coords=False).sortby('lon')\t\t#EPIC 1 line fix for the dateline problem.\nland_pac=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))\nland_pac['time']=land_pac.time.astype('datetime64[M]')\nland_pac_all=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))\n\nland_pac=land_pac.fgco2_smoothed\n\natmco2=land_pac_all.atm_co2\ndco2=land_pac_all.dco2\npco2=land_pac_all.spco2_smoothed\nkw=land_pac_all.kw\n\nf_ratios=xr.open_mfdataset('processed/flux/fratios.nc')\nratio=f_ratios.laws2011a#laws2000#laws2000,laws2011a,laws2011b,henson2011\n\nnpp1=xr.open_dataset('processed/flux/avg_npp_rg_cafe.nc')\navg_npp=(npp1.avg_npp/1000)*ratio\n\nland=moles_to_carbon(land_pac)/365 #LANDSCHUTZ\n\n\ndiff=land-avg_npp\ndiff1=diff.where((diff<0.1)|(diff<-0.1),np.nan)\n\n\n# Need to combine the chlorophyll products, takes a bit of memory.\nchl=xr.open_dataset('processed/flux/tpca.nc').tpca#'sw_month.nc')\n\n#mod=xr.open_dataset('datasets/tpca/mod_month.nc')\nchl['time']=chl.time.astype('datetime64[M]')\n#mod['time']=mod.time.astype('datetime64[M]')\n#tpca=sw\n#tpca=tpca.merge(mod)\n#chl = tpca.to_array(dim='tpca').mean('tpca')\n\n#SST\nsst = xr.open_dataset('datasets/sst/sst.mnmean.nc')\nsst= sst.assign_coords(lon=(sst.lon % 360)).roll(lon=(sst.dims['lon']),roll_coords=False).sortby('lon')\t\t#EPIC 1 line fix for the dateline problem.\nsst=sst.sel(lon=slice(120,290),lat=slice(20,-20)).sst\nsst=sst.where(seamask.seamask==1)\n\npCO2 = xr.open_dataarray('processed/flux/pco2grams.nc') #_norm\nintegratedpCO2 = (pCO2*12*50)\n\n#wu=xr.open_dataset('datasets/uwnd.mon.mean.nc').sel(level=1000,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd\n#wv=xr.open_dataset('datasets/vwnd.mon.mean.nc').sel(level=1000,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd\nwu=xr.open_dataset('datasets/uwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd\nwv=xr.open_dataset('datasets/vwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd\ndco2['time']=dco2.time.astype('datetime64[M]')\n\nws=np.sqrt((wu**2)+(wv**2))\n\n\n\nprecip= xr.open_dataset('datasets/precip.mon.mean.enhanced.nc').sel(lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).precip\n\n# # THIS NEEDS TO BE RUN ONCE BUT CAN be memory intensive\n\n# w_ccmp_a=xr.open_mfdataset('datasets/ws_ccmp/*.nc') #Downloaded manually\n# w_ccmp_a['time']=w_ccmp_a.time.astype('datetime64[M]')\n# w_ccmp_a=w_ccmp_a.sel(latitude=slice(-20,20))\n\n# w_ccmp_b=xr.open_mfdataset('datasets/CCMP_winds.nc') #Bulk ErDap download\n# dt=w_ccmp_b.indexes['time'].to_datetimeindex()\n# w_ccmp_b['time']=dt\n\n# w_ccmp=xr.merge([w_ccmp_b,w_ccmp_a])\n\n\n# w_ccmp=w_ccmp.sel(longitude=slice(120,290),latitude=slice(-20,20))\n# ws_ccmp=np.sqrt((w_ccmp.uwnd**2)+(w_ccmp.vwnd**2))\n# ws_ccmp=ws_ccmp.rename({'latitude':'lat','longitude':'lon'})\n# try:\n# ws_ccmp.to_netcdf('datasets/CCMP_windspeed.nc')\n# print('saved')\n# except:\n# pass\n\nws_ccmp=xr.open_dataarray('datasets/CCMP_windspeed.nc')\n#ws_ccmp=xr.open_dataarray('processed/CCMP_ws_1deg.nc')\n\n# %% Prepare Figure \n\n\nlanina=pd.read_csv('processed/indexes/la_nina_events.csv')\ncp_nino=pd.read_csv('processed/indexes/cp_events.csv')\nep_nino=pd.read_csv('processed/indexes/ep_events.csv')\n\nfp='processed/combined_dataset/month_data_exports.nc'\ninfo=xr.open_mfdataset(fp).sel(Mooring=195).to_dataframe()\n\n\n#Process EP, CP and Nino events.\nnina=pd.DataFrame()\nep=pd.DataFrame()\ncp=pd.DataFrame()\nfor i in lanina.iterrows(): nina=nina.append(info[slice(i[1].start,i[1].end)])\nfor i in ep_nino.iterrows(): ep=ep.append(info[slice(i[1].start,i[1].end)])\nfor i in cp_nino.iterrows(): cp=cp.append(info[slice(i[1].start,i[1].end)])\nnina_dates=nina.index\nep_dates=ep.index[4:]\ncp_dates=cp.index\n#all_dates=chl.time\nall_dates=info.index[36:] #2000 - 2020\n\n\nfig=plt.figure(figsize=(19*2/2.54,23*2/2.54))#(figsize=(30,15))\nsb1=7\nsb2=3\n\n\n#%% EP\n\nplot_basemap_row(fig,axn=1,\n hovmol=sst.sel(time=ep_dates),\n units='Degrees C',\n title='SST',\n units_tr='Degrees C year$^{-1}$',\n levs=np.arange(20,32,1),\n levs_trend=np.arange(-0.06,0.07,0.01),\n #trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=4,\n hovmol=ws_ccmp.sel(time=ep_dates),\n units='m s$^{-1}$',\n title='Wind speed',\n units_tr='m s$^{-1}$ year$^{-1}$', \n levs=np.arange(0,11,1),\n levs_trend=np.arange(-0.1,0.125,0.025),\n \n #levs_trend=np.arange(-0.15,0.175,0.025),\n #trend_conversion=1000,\n cmap='viridis',\n wu=wu,wv=wv)\n\nplot_basemap_row(fig,axn=7,\n hovmol=chl.sel(time=ep_dates),\n units='mg chl m$^{-3}$ day$^{-1}$',\n title='TPCA chlorophyll',\n units_tr='ug chl m$^{-3}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.65,0.05),\n \n levs_trend=np.arange(-4,4.1,1),\n trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=10,\n hovmol=avg_npp.sel(lat=slice(-15,15),time=ep_dates),\n units='gC m$^{-2}$ day$^{-1}$',\n title='New production',\n units_tr='mgC m$^{-2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.26,0.025),\n levs_trend=np.arange(-2,2.1,0.25),\n trend_conversion=1000,\n cmap='viridis')\n\nplot_basemap_row(fig,axn=13,\n hovmol=precip.sel(time=ep_dates),\n units='mm day$^{-1}$',\n title='Precipitation',\n units_tr='mm day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,13,1),\n levs_trend=np.arange(-0.08,0.085,0.005),\n #trend_conversion=1000,\n cmap='viridis')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=16,\n hovmol=dco2.sel(time=ep_dates),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n units_tr='μatm year$^{-1}$',\n levs=np.arange(-15,121,10),#(200,1200,10),#(5.5,9.5,0.5),\n levs_trend=np.arange(-2.5,2.6,0.1),\n trend_conversion=1,#1000,\n cmap='viridis',\n cmaptr='RdBu_r')#'Reds')\n\nplot_basemap_row(fig,axn=19,\n hovmol=land.sel(time=ep_dates),\n units='gC m$_{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n units_tr='mgC m$^{2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(-0.14,0.15,0.02),\n levs_trend=np.arange(-2,2.1,0.5),\n trend_conversion=1000,\n cmap='RdBu_r')\n\n\n# %% CP\n\nplot_basemap_row(fig,axn=2,\n hovmol=sst.sel(time=cp_dates),\n units='Degrees C',\n title='SST',\n units_tr='Degrees C year$^{-1}$',\n levs=np.arange(20,32,1),\n levs_trend=np.arange(-0.06,0.07,0.01),\n #trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=5,\n hovmol=ws_ccmp.sel(time=cp_dates[:-7]),\n units='m s$^{-1}$',\n title='Wind speed',\n units_tr='m s$^{-1}$ year$^{-1}$', \n levs=np.arange(0,11,1),\n levs_trend=np.arange(-0.1,0.125,0.025),\n \n #levs_trend=np.arange(-0.15,0.175,0.025),\n #trend_conversion=1000,\n cmap='viridis',\n wu=wu,wv=wv)\n\nplot_basemap_row(fig,axn=8,\n hovmol=chl.sel(time=cp_dates[:-5]),\n units='mg chl m$^{-3}$ day$^{-1}$',\n title='TPCA chlorophyll',\n units_tr='ug chl m$^{-3}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.65,0.05),\n \n levs_trend=np.arange(-4,4.1,1),\n trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=11,\n hovmol=avg_npp.sel(lat=slice(-15,15),time=cp_dates),\n units='gC m$^{-2}$ day$^{-1}$',\n title='New production',\n units_tr='mgC m$^{-2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.26,0.025),\n levs_trend=np.arange(-2,2.1,0.25),\n trend_conversion=1000,\n cmap='viridis')\n\nplot_basemap_row(fig,axn=14,\n hovmol=precip.sel(time=cp_dates),\n units='mm day$^{-1}$',\n title='Precipitation',\n units_tr='mm day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,13,1),\n levs_trend=np.arange(-0.08,0.085,0.005),\n #trend_conversion=1000,\n cmap='viridis')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=17,\n hovmol=dco2.sel(time=cp_dates),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n units_tr='μatm year$^{-1}$',\n levs=np.arange(-15,121,10),#(200,1200,10),#(5.5,9.5,0.5),\n levs_trend=np.arange(-2.5,2.6,0.1),\n trend_conversion=1,#1000,\n cmap='viridis',\n cmaptr='RdBu_r')#'Reds')\n\nplot_basemap_row(fig,axn=20,\n hovmol=land.sel(time=cp_dates),\n units='gC m$_{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n units_tr='mgC m$^{2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(-0.14,0.15,0.02),\n levs_trend=np.arange(-2,2.1,0.5),\n trend_conversion=1000,\n cmap='RdBu_r')\n\n\n#%% NINA\n\n\nplot_basemap_row(fig,axn=3,\n hovmol=sst.sel(time=nina_dates),\n units='Degrees C',\n title='SST',\n units_tr='Degrees C year$^{-1}$',\n levs=np.arange(20,32,1),\n levs_trend=np.arange(-0.06,0.07,0.01),\n #trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=6,\n hovmol=ws_ccmp.sel(time=nina_dates),\n units='m s$^{-1}$',\n title='Wind speed',\n units_tr='m s$^{-1}$ year$^{-1}$', \n levs=np.arange(0,11,1),\n levs_trend=np.arange(-0.1,0.125,0.025),\n \n #levs_trend=np.arange(-0.15,0.175,0.025),\n #trend_conversion=1000,\n cmap='viridis',\n wu=wu,wv=wv)\n\nplot_basemap_row(fig,axn=9,\n hovmol=chl.sel(time=nina_dates),\n units='mg chl m$^{-3}$ day$^{-1}$',\n title='TPCA chlorophyll',\n units_tr='ug chl m$^{-3}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.65,0.05),\n \n levs_trend=np.arange(-4,4.1,1),\n trend_conversion=1000,\n cmap='viridis')\n\n\nplot_basemap_row(fig,axn=12,\n hovmol=avg_npp.sel(lat=slice(-15,15),time=nina_dates),\n units='gC m$^{-2}$ day$^{-1}$',\n title='New production',\n units_tr='mgC m$^{-2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,0.26,0.025),\n levs_trend=np.arange(-2,2.1,0.25),\n trend_conversion=1000,\n cmap='viridis')\n\nplot_basemap_row(fig,axn=15,\n hovmol=precip.sel(time=nina_dates),\n units='mm day$^{-1}$',\n title='Precipitation',\n units_tr='mm day$^{-1}$ year$^{-1}$',\n levs=np.arange(0,13,1),\n levs_trend=np.arange(-0.08,0.085,0.005),\n #trend_conversion=1000,\n cmap='viridis')\n\n\n#Delta pCO2\nh=plot_basemap_row(fig,axn=18,\n hovmol=dco2.sel(time=nina_dates),#npp1.avg_npp,#dco2,#integratedpCO2,#monthlyPCO2*1000,\n units='μatm',\n title='\\u0394pCO$_{2}$',#'pCO21',\n units_tr='μatm year$^{-1}$',\n levs=np.arange(-15,121,10),#(200,1200,10),#(5.5,9.5,0.5),\n levs_trend=np.arange(-2.5,2.6,0.1),\n trend_conversion=1,#1000,\n cmap='viridis',\n cmaptr='RdBu_r')#'Reds')\n\nplot_basemap_row(fig,axn=21,\n hovmol=land.sel(time=nina_dates),\n units='gC m$_{-2}$ day$^{-1}$',\n title='Air-sea CO$_{2}$ flux',\n units_tr='mgC m$^{2}$ day$^{-1}$ year$^{-1}$',\n levs=np.arange(-0.14,0.15,0.02),\n levs_trend=np.arange(-2,2.1,0.5),\n trend_conversion=1000,\n cmap='RdBu_r')\n\n\n\n\n\n\nplt.tight_layout()\n# plt.savefig('figs/Figure3_Spatial_map_'+ratio.name+etype+'.png',dpi=100)\n# plt.savefig('figs/vector/Figure3_Spatial_map_'+ratio.name+etype+'.eps')\n# plt.savefig('figs/vector/Figure3_Spatial_map_'+ratio.name+etype+'.pdf')\n\n# try:\n# plt.savefig('figs/Figure3_Spatial_map_'+ratio.name+'.jpeg',dpi=300)\n# except:\n# pass\n# plt.show()\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"numpy.arange",
"numpy.datetime64",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid"
]
] |
slowy07/medical-BCDU | [
"dab1ddcacbe093b78e6830d52db2a4e6fabc3d52"
] | [
"lungSegmentation/RFunction.py"
] | [
"from __future__ import division\nimport numpy as np\nfrom scipy.ndimage.morphology import binary_erosion, binary_fill_holes\n\ndef hu_to_grayscale(volume):\n volume = np.clip(volume, -512, 512)\n mxmal = np.max(volume)\n mnval = np.min(volume)\n im_volume = (volume - mnval) / max(mxval - mnval, 1e-3)\n im_volume = im_volume\n \n return im_volume* 255\n\ndef get_mask_lung(vol):\n vol_im = np.where(vol > 0, 1, 0)\n shp = vol.shape\n around_img = np.zeros((shp[0], shp[1], shp[2]), dtype = np.float32)\n for idx in range(shp[0]):\n around_lung[idx, :, :] = binary_erosion(vol_im[idx], structure = np.ones((15, 15))).astype(vol_im.dtype)\n\n return around_lung\n\ndef get_mask(segmentation):\n # initialize ouput to zero\n shp = segmentation.shape\n lung = np.zeros((shp[0], shp[1], shp[2]), dtype = np.float32)\n\n lung[np.equal(segmentation, 255)] = 255\n \n return lung\n\ndef get_FOV(around_lung, lung):\n FOV = np.where((around_lung + lung) > 0, 1, 0)\n for idx in range(FOV.shape[0]):\n FOV[idx, :, :] = binary_fill_holes(FOV[idx, :, :], structure = np.ones((5, 5))).astype(FOV.dtype)\n \n return FOV\n\ndef return_axials(vol, seg):\n vol = vol.get_data()\n seg = seg.get_data()\n seg = seg.astype(np.int32)\n\n # convert to visual format\n vol_ims = hu_to_grayscale(vol_ims)\n lung = get_mask(seg)\n around_lung = get_mask_lung(vol_ims)\n FOV = get_FOV(around_lung, lung)\n \n around_lung = np.where((FOV - lung) > 0, 1, 0)\n \n return vol_ims, lung, around_lung, FOV"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.equal",
"numpy.clip",
"numpy.max",
"numpy.min",
"numpy.where"
]
] |
jlsuarezdiaz/pyDML-Stats | [
"495de64dbcda73ce20d8e916bf5e5077a8dae98a"
] | [
"scripts/utils/toy_datasets.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 26 12:25:25 2018\n\nToy datasets.\n\n@author: jlsuarezdiaz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom six.moves import xrange\nfrom sklearn.preprocessing import LabelEncoder\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import(\n load_iris, load_digits)\n\ndef toy_plot(X,y):\n f, ax = plt.subplots(figsize=(12,9))\n \n plt.axis('equal')\n plt.scatter(X[:,0],X[:,1],c=y,cmap=\"rainbow\",label=y)\n \n #cmap = plt.get_cmap('rainbow')\n #cc = np.unique(y)\n #cn = len(cc)\n #for i,c in enumerate(cc):\n # print(i,c)\n # ind = np.where(y == c)[0]\n # print(ind)\n # XX = X[ind]\n # print(cmap(i/(cn+1)))\n # ax.scatter(XX[:,0],XX[:,1],c=cmap(i/(cn+1)),label=c)\n #plt.legend()\n \n plt.show()\n return plt\n\ndef circular_toy_dataset(rads = [1,2], samples = [200,200], noise = [0.2,0.2], seed = None):\n if seed is not None:\n np.random.seed(seed)\n \n n = sum(samples)\n d = 2\n X = np.empty([n,d])\n y = np.empty([n])\n le = LabelEncoder()\n le.fit(rads)\n \n acum = 0\n for j,s in enumerate(samples):\n for i in xrange(s):\n ns1 = noise[j]*np.random.randn()\n ns2 = noise[j]*np.random.randn()\n x1 = (rads[j]+ns1)*np.cos(2*np.pi*i/s)\n x2 = (rads[j]+ns2)*np.sin(2*np.pi*i/s)\n \n X[acum+i,:] = [x1,x2]\n y[acum+i] = rads[j]\n \n acum += s\n y = le.transform(y)\n \n return X,y\n\ndef hiperplane_toy_dataset(ws = [[1,1],[1,-1]],bs = [[0,0],[0,0]],nsamples=800,xrange=[-1,1],yrange=[-1,1], noise = 0.1,seed = None):\n if seed is not None:\n np.random.seed(seed)\n \n n=nsamples\n d=2\n X = np.random.rand(n,d)\n y = np.zeros([n])\n yy = np.empty([n,len(ws)])\n \n X[:,0] = (xrange[1]-xrange[0])*X[:,0]+xrange[0]\n X[:,1] = (yrange[1]-yrange[0])*X[:,1]+yrange[0]\n \n for j, (w, b) in enumerate(zip(ws,bs)):\n w = np.matrix(w)\n b = np.matrix(b)\n ns = noise*np.random.randn(n,2)\n yy[:,j] = np.sign(((X+ns)-b).dot(w.T)).reshape([n])\n \n yy[yy==-1]=0\n yy = yy.astype(int)\n\n for i in range(n):\n for j, u in enumerate(yy[i,:]):\n y[i] += (u << j)\n \n return X,y\n \ndef iris2d_toy_dataset(dims=[0,2]):\n data=load_iris() # IRIS\n X=data['data']\n X=X[:,dims]\n y=data['target']\n return X,y\n\ndef balls_toy_dataset(centers = [[-2,-2],[0,0],[2,2],[2,-2],[-2,2]],rads = [1.4,1.4,1.4,1.4,1.4],samples=[200,200,200,200,200],noise = [0.3,0.3,0.3,0.3,0.3],seed=None):\n if seed is not None:\n np.random.seed(seed)\n \n n = sum(samples)\n d=2\n \n X=np.empty([n,d])\n y=np.empty([n])\n \n acum=0\n for j, s in enumerate(samples):\n rs = rads[j]*np.random.rand(s)\n angs = 2*np.pi*np.random.rand(s)\n ns = noise[j]*np.random.rand(s)\n c = np.array(centers[j])\n \n for i in xrange(s):\n X[acum+i,:] = c +ns[i] + rs[i]*np.array([np.cos(angs[i]),np.sin(angs[i])])\n y[acum+i]=j\n \n acum += s\n \n return X,y\n\ndef simetria_hor(A):\n nrow, ncol= A.shape\n A = np.abs(A-A[:,::-1]) # Diferencia con la imagen simétrica\n return np.mean(A) # Media de las diferencias (grado de simetría)\n\ndef simetria_ver(A):\n nrow, ncol= A.shape\n A = np.abs(A-A[::-1,:]) # Diferencia con la imagen simétrica\n return np.mean(A) # Media de las diferencias (grado de simetría)\n\ndef digits_toy_dataset(dims=[0,2],numbers=[0,1,2,3,4,5,6,7,8,9]):\n data=load_digits()\n XX = data['data']\n y = data['target']\n nn,dd = XX.shape\n XX = XX.reshape([nn,8,8])\n\n X = np.empty([nn,3])\n for i in xrange(nn):\n X[i,0] = simetria_hor(XX[i,:,:])\n X[i,1] = simetria_ver(XX[i,:,:])\n X[i,2] = np.mean(XX[i,:])\n \n selected = np.where(np.isin(y,numbers))[0]\n \n return X[selected,:][:,dims],y[selected] \n\ndef single_toy_dataset(samples=8, classes = 3, seed=None):\n X = np.empty([samples,2])\n y = np.empty([samples])\n for i in xrange(samples):\n c = np.random.randint(0,classes)\n x = np.random.rand(1,2)\n X[i,:]=x\n y[i]=c\n \n return X,y\n "
] | [
[
"numpy.random.seed",
"sklearn.preprocessing.LabelEncoder",
"sklearn.datasets.load_iris",
"numpy.abs",
"numpy.cos",
"numpy.isin",
"numpy.random.rand",
"sklearn.datasets.load_digits",
"numpy.mean",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots",
"numpy.empty",
"numpy.matrix",
"numpy.random.randn",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sin",
"numpy.random.randint"
]
] |
Ayushk4/tsat | [
"07f9535157e45c4b27dae7d73d199fef7fb9d37a"
] | [
"common/metrics/basic_metrics.py"
] | [
"#----------------------------------------\n#--------- Torch Related Imports --------\n#----------------------------------------\nimport torch\nimport torch.distributed as distributed\n\n#----------------------------------------\n#--------- Import Wandb Here ------------\n#----------------------------------------\nimport wandb\n\n\nclass TrainAccuracyMetric():\n\n def __init__(self, initial_value, allreduce=False, **kwargs):\n\n self.current_value = initial_value\n self.iterations = 1\n self.allreduce = allreduce\n\n def update(self, new_value):\n\n self.current_value = (self.current_value - (self.current_value-new_value)/(self.iterations + 1))\n\n # If all reduce, get the number of GPUs\n if self.allreduce:\n gpus = torch.tensor(1.0).cuda()\n\n # convert to tensor\n cv = torch.tensor(self.current_value).cuda()\n\n distributed.all_reduce(cv, op=distributed.ReduceOp.SUM)\n distributed.all_reduce(gpus, op=distributed.ReduceOp.SUM)\n\n self.current_value = cv.item()/gpus.item()\n\n self.iterations += 1\n\n def wandb_log(self, metric_name, step):\n\n wandb.log({metric_name: self.current_value}, step=step)\n\nclass TrainLossMetric():\n\n def __init__(self, initial_value, **kwargs):\n\n self.current_value = initial_value\n\n def update(self, new_value):\n\n self.current_value = new_value\n\n def wandb_log(self, metric_name, step):\n\n wandb.log({metric_name: self.current_value}, step=step)\n\nclass ValAccuracyMetric():\n\n def __init__(self, initial_value, allreduce=False, **kwargs):\n\n self.current_value = initial_value\n self.best_value = initial_value\n self.updated_best_val = True\n self.allreduce = allreduce\n\n def update(self, new_value):\n\n self.current_value = new_value\n\n # If all reduce, get the number of GPUs\n if self.allreduce:\n gpus = torch.tensor(1.0).cuda()\n\n # convert to tensor\n cv = torch.tensor(self.current_value).cuda()\n\n distributed.all_reduce(cv, op=distributed.ReduceOp.SUM)\n distributed.all_reduce(gpus, op=distributed.ReduceOp.SUM)\n\n self.current_value = cv.item()/gpus.item()\n\n if self.current_value > self.best_value:\n self.best_value = self.current_value\n self.updated_best_val = True\n else:\n self.updated_best_val = False\n\n def wandb_log(self, metric_name, step):\n\n wandb.log({f'current_{metric_name}': self.current_value, f'best_{metric_name}': self.best_value}, step=step)\n"
] | [
[
"torch.tensor",
"torch.distributed.all_reduce"
]
] |
iimmortall/QuantLib | [
"29e83dad8738d0fb4efb18d0cb5dd3a7029abd86"
] | [
"losses/loss_factory.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\n\n\ndef cross_entropy_dist_epoch(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n l1_fn = torch.nn.L1Loss(reduction=reduction)\n\n def loss_fn(outputs, outputs_f, labels, epoch, **_):\n loss_dict = dict()\n full_gt_loss = cross_entropy_fn(outputs_f['out'], labels)\n gt_loss = cross_entropy_fn(outputs['out'], labels)\n dist_loss = 0\n layer_names = outputs.keys()\n len_layer = len(layer_names)\n\n for i, layer_name in enumerate(layer_names):\n if i == len_layer - 1:\n continue\n dist_loss += l1_fn(outputs[layer_name], outputs_f[layer_name])\n\n scale = epoch / 100\n if epoch == 100:\n scale = 1\n\n loss_dict['loss'] = scale*(gt_loss + dist_loss) + full_gt_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['full_gt_loss'] = full_gt_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef cross_entropy_dist(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n l1_fn = torch.nn.L1Loss(reduction=reduction)\n\n def loss_fn(outputs, outputs_f, labels, **_):\n loss_dict = dict()\n full_gt_loss = cross_entropy_fn(outputs_f['out'], labels)\n gt_loss = cross_entropy_fn(outputs['out'], labels)\n dist_loss = 0\n layer_names = outputs.keys()\n len_layer = len(layer_names)\n\n for i, layer_name in enumerate(layer_names):\n if i == len_layer - 1:\n continue\n dist_loss += l1_fn(outputs[layer_name], outputs_f[layer_name])\n\n loss_dict['loss'] = gt_loss + dist_loss + full_gt_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['full_gt_loss'] = full_gt_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef cross_entropy(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n def loss_fn(outputs, labels, **_):\n loss_dict = dict()\n gt_loss = cross_entropy_fn(outputs, labels)\n loss_dict['loss'] = gt_loss\n loss_dict['gt_loss'] = gt_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef regularization(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n def loss_fn(outputs, labels, reg_factors, **_):\n loss_dict = dict()\n gt_loss = cross_entropy_fn(outputs, labels)\n reg_loss = 0\n for i in range(len(reg_factors)):\n reg_loss += torch.mean((torch.pow(reg_factors[i]-1, 2)*torch.pow(reg_factors[i]+1, 2)))\n reg_loss = reg_loss / len(reg_factors)\n loss_dict['loss'] = gt_loss + reg_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['reg_loss'] = reg_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef regularization_temp(reduction='mean', **_):\n cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n def loss_fn(outputs, labels, reg_factors, **_):\n loss_dict = dict()\n gt_loss = cross_entropy_fn(outputs, labels)\n reg_loss = 0\n for i in range(len(reg_factors)):\n reg_loss += torch.mean((torch.pow(reg_factors[i]-1, 2)*torch.pow(reg_factors[i]+1, 2)))\n reg_loss = reg_loss / len(reg_factors)\n loss_dict['loss'] = gt_loss + reg_loss\n loss_dict['gt_loss'] = gt_loss\n loss_dict['reg_loss'] = reg_loss\n return loss_dict\n\n return {'train': loss_fn, 'val': cross_entropy_fn}\n\n\ndef get_loss(config):\n f = globals().get(config.loss.name)\n return f(**config.loss.params)\n"
] | [
[
"torch.nn.L1Loss",
"torch.pow",
"torch.nn.CrossEntropyLoss"
]
] |
Payuing/evoDNN | [
"79b727d5062a27d3f8e95f175c509613f52e58aa"
] | [
"legacy/src/EvoNN.py"
] | [
"\"\"\"\r\nA module to implement the evolutionary algorithm for\r\na feedforward neural network.\r\nCrossover and mutation\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\n\r\nimport sys\r\nimport math\r\nimport csv\r\nimport warnings\r\nimport numpy as np\r\nimport random\r\nimport copy\r\nfrom datetime import datetime\r\nwarnings.filterwarnings(\"ignore\") # never print matching warnings\r\nsys.path.append(\"/Users/Payu/Desktop/EvoNN_package/EvoNN_DNN\") #thrid party's libararies, absolute path\r\n\r\n\"\"\"Constant\"\"\"\r\nNUM_LAYERS = 5 # Assertion test number of layers\r\n\r\n\"\"\"Activation function\"\"\"\r\ndef sigmoid(x):\r\n\treturn 1/(1+np.exp(-x))\r\n\r\ndef tanh(x):\r\n\treturn np.tanh(x)\r\n\r\n\"\"\"Loss function\"\"\"\r\ndef RMSE(y_predicted, y_true):\r\n\ty_predicted = y_predicted.reshape((y_predicted.shape[0],))\r\n\treturn np.sqrt(np.mean((y_predicted - y_true)**2))\r\n\r\n\"\"\"Return predicted value array\"\"\"\r\ndef Identity(final_layer_values):\r\n\treturn final_layer_values[:]\r\n\r\nclass Evolver:\r\n\tdef __init__(\tself,\r\n\t\t\t\t\tG=10,\t\t\t\t\t\t\t\t# Maximum iteration\r\n\t\t\t\t\tearly_stopping=10,\t\t\t\t\t# Minimum iteration\r\n\t\t\t\t\tnode_per_layer = [10],\t\t\t\t# Number of nodes per layer\r\n\t\t\t\t\tMU=10,\t\t\t\t\t\t\t\t# Number of parents\r\n\t\t\t\t\tLAMBDA=10,\t\t\t\t\t\t\t# Number of offspring\r\n\t\t\t\t\tP_m=0.1,\t\t\t\t\t\t\t# Weight mutation probability\r\n\t\t\t\t\tP_mf=0.1,\t\t\t\t\t\t\t# Function mutation probablity\r\n\t\t\t\t\tR_m=1.0,\t\t\t\t\t\t\t# Weight mutation radius\r\n\t\t\t\t\tP_c=0.5,\t\t\t\t\t\t\t# Crossover proportion\r\n\t\t\t\t\tP_b=0.01,\t\t\t\t\t\t\t# Bias mutation probablity\r\n\t\t\t\t\tR_b=1.0,\t\t\t\t\t\t\t\t# Bias mutation radius\r\n\t\t\t\t\telitism=True,\t\t\t\t\t\t# Elitism involves copying a small proportion of the fittest candidates, unchanged, into the next generation.\r\n\t\t\t\t\ttournament_size=2,\t\t\t\t\t# Selecting individuals from a population\r\n\t\t\t\t\tfitness_function=RMSE,\r\n\t\t\t\t\tfinal_activation_function=Identity,\r\n\t\t\t\t\tadditional_functions=[],\r\n\t\t\t\t\trandom_state=None,\r\n\t\t\t\t\tverbose=0):\r\n\r\n\t\tself.generation_number = G\r\n\t\tself.early_stopping = early_stopping\r\n\t\tself.node_per_layer = node_per_layer\r\n\t\tself.mu = MU\r\n\t\tself.lam = LAMBDA\r\n\t\tself.P_M = P_m\r\n\t\tself.P_MF = P_mf\r\n\t\tself.P_C = P_c\r\n\t\tself.R_M = R_m\r\n\t\tself.P_B = P_b\r\n\t\tself.R_B = R_b\r\n\t\tself.ELITISM = elitism\r\n\t\tself.TOURNAMENT_SIZE = tournament_size\r\n\t\tself.fitness = fitness_function\r\n\t\tself.final_activation = final_activation_function\r\n\t\tself.functions = {0: sigmoid,\r\n 1: tanh}\t\t\t\t\t\t# Using a dictionary to select function\r\n\t\tif (random_state is not None):\r\n\t\t\ttime_seconds = int(datetime.now().timestamp())\t# Python 3.3+ only\r\n\t\t\tnp.random.seed(random_state + time_seconds)\t\t# add system time to generate random number\r\n\t\t\trandom.seed(random_state + time_seconds)\r\n\t\tself.verbose = verbose\r\n\t\tself.final_population = None\r\n\t\tself.best_individual = None\r\n\r\n\t\tkey = len(self.functions)\t# add additional activation functions\r\n\t\tfor additional_function in additional_functions:\r\n\t\t\tself.functions[key] = additional_function\r\n\t\t\tkey += 1\r\n\t\tprint(\"Network has {} layers, they are {}.\".format(len(self.node_per_layer), self.node_per_layer))\r\n\r\n ######################################################################################\r\n\t\"\"\"Train the EvoNN\"\"\"\r\n\tdef fit(self, X_train, Y_train, X_val = None, Y_val = None):\r\n\t\t#initialization\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Input is a {} X {} matrix\".format(X_train.shape[0], X_train.shape[1]))\r\n\t\t\tif (X_val is not None):\r\n\t\t\t\tprint(\"Validation is a {} X {} matrix\".format(X_val.shape[0], X_val.shape[1]))\r\n\t\tself.X_train = X_train\r\n\t\tself.Y_train = Y_train\r\n\t\tself.X_val = X_val\r\n\t\tself.Y_val = Y_val\r\n\t\tself.feature_number = X_train.shape[1]\r\n\t\ttry:\r\n\t\t\tself.output_number = Y_train.shape[1]\t# more than one column\r\n\t\texcept IndexError:\r\n\t\t\tself.output_number = 1\r\n\t\toffspring = []\r\n\t\tpopulation = self.initialize_population() # \"mu\" used at start to create a populatiion pool of network\r\n\t\taverage_fitness, average_fitness_validate, best_fitness_train, best_fitness_validate, best_individual = self.evaluate_population(population)\r\n\t\tvalidate_timer = 0\r\n\t\tbest_fitness_validate_of_all_generations = best_fitness_validate # the smaller the better\r\n\t\tbest_individual_validate = best_individual\r\n\r\n\t\tcurr_generation_number = 1\r\n\t\twhile ((curr_generation_number < self.generation_number + 1) and (self.early_stopping > validate_timer)):\r\n\t\t\tif (curr_generation_number % 5 == 0):\r\n\t\t\t\tprint(\"run for {} generations\".format(curr_generation_number))\r\n\r\n\t\t\tif (self.verbose >= 1):\r\n\t\t\t\tprintout_statement = \"Generation \"+str(curr_generation_number)\r\n\t\t\t\tprintout_statement += \"\\tTrain \"\r\n\t\t\t\tprintout_statement += \"\\tbest fitness train: \"+str(best_fitness_train)\r\n\t\t\t\tif (self.X_val is not None):\r\n\t\t\t\t\tprintout_statement += \"\\tValidate \"\r\n\t\t\t\t\tprintout_statement += \"\\tbest fitness: \"+str(best_fitness_validate_of_all_generations)\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Please specify validate set.\")\r\n\t\t\t\t\texit()\r\n\t\t\t\tprint(printout_statement)\r\n\r\n\t\t\toffspring = self.make_offspring(population) # a list of offspring; use lam to generate\r\n\t\t\tfor theOffspring in offspring:\r\n\t\t\t\ttheOffspring.mutate(self.P_M, self.P_MF, self.R_M, self.P_B, self.R_B)\r\n\t\t\tpopulation = [] # generate new population\r\n\r\n\t\t\t# Only one best individual\r\n\t\t\tif (self.ELITISM == True):\r\n\t\t\t\tcopy_individual = EvoNN.copyIndividual(best_individual)\r\n\t\t\t\tpopulation.append(copy_individual)\r\n\t\t\t\tinit_range = 1\r\n\t\t\telse:\r\n\t\t\t\tinit_range = 0\r\n\r\n\t\t\t\"\"\"Generate next parent generation\"\"\"\r\n\t\t\tfor i in range(init_range, self.mu):\r\n\t\t\t\ttheOriginal = self.tournament_selection(offspring, self.TOURNAMENT_SIZE)\r\n\t\t\t\tcopy_individual = EvoNN.copyIndividual(theOriginal)\r\n\t\t\t\tpopulation.append(copy_individual)\r\n\r\n\t\t\taverage_fitness, average_fitness_validate, best_fitness_train, best_fitness_validate, best_individual = self.evaluate_population(population)\r\n\r\n\t\t\tif (self.X_val is not None):\r\n\t\t\t\tif (best_fitness_validate < best_fitness_validate_of_all_generations):\r\n\t\t\t\t\tbest_fitness_validate_of_all_generations = best_fitness_validate\r\n\t\t\t\t\tbest_individual_validate = copy.deepcopy(best_individual)\r\n\t\t\t\t\tvalidate_timer = 0\r\n\t\t\t\telse:\r\n\t\t\t\t\tvalidate_timer += 1 # if no improvement\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Please specify validate set.\")\r\n\t\t\t\texit()\r\n\r\n\t\t\tcurr_generation_number += 1\r\n\r\n\t\tself.best_individual = copy.deepcopy(best_individual_validate)\r\n\t\tself.final_population = copy.deepcopy(population)\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(self.best_individual)\r\n\r\n\t######################################################################################\r\n\t\"\"\"\"Predict on test dataset\"\"\"\r\n\tdef predict_proba(self, X_test):\r\n\t\treturn self.best_individual.get_output(X_test)\r\n\r\n ######################################################################################\r\n\t\"\"\"Predict on test dataset\"\"\"\r\n\tdef predict(self, X_test):\r\n\t\treturn self.best_individual.get_output(X_test)\r\n\r\n ######################################################################################\r\n\tdef initialize_population(self):\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Initializing population...\")\r\n\r\n\t\tmy_population = []\r\n\t\tfor i in range(self.mu):\r\n\t\t\ttheIndividual = EvoNN.newIndividual(self.feature_number, self.output_number, self.final_activation, hidden_size = self.node_per_layer, function_dictionary = self.functions)\r\n\t\t\tmy_population.append(theIndividual) # theIndividual is a standalone network\r\n\t\t\tif (self.verbose >= 1):\r\n\t\t\t\tprint(\"\\t\\t\\t {}\".format(my_population[i]))\r\n\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Population initialized\")\r\n\t\treturn my_population\r\n\r\n ######################################################################################\r\n\tdef evaluate_population(self, the_population):\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Evaluating population\")\r\n\r\n\t\t\"\"\"\"Initialize parameters\"\"\"\r\n\t\taverage_fitness_train = 0.0 # the whole population\r\n\t\taverage_fitness_validate = 0.0\r\n\r\n\t\tpopulation_count_train = 0\r\n\t\tpopulation_count_validate = 0\r\n\r\n\t\tbest_fitness_train = the_population[0].fitness\r\n\t\tbest_fitness_validate = the_population[0].fitness\r\n\r\n\t\tbest_individual = the_population[0]\r\n\r\n\t\tfor individual in the_population:\r\n\t\t\tY_predict = individual.get_output(self.X_train)\r\n\t\t\tfitness_value_train = self.fitness(Y_predict, self.Y_train) # Y_train is a 2d one-hot coding matrix\r\n\t\t\tindividual.fitness = fitness_value_train\r\n\r\n\t\t\tif not (math.isnan(fitness_value_train)):\r\n\t\t\t\taverage_fitness_train += fitness_value_train\r\n\t\t\t\tpopulation_count_train += 1\r\n\r\n\t\t\t\"\"\"best_fitness_train: the smaller the better\"\"\"\r\n\t\t\tif (fitness_value_train < best_fitness_train):\r\n\t\t\t\tbest_fitness_train = fitness_value_train\r\n\t\t\t\tbest_individual = individual\r\n\r\n\t\t\tif (self.X_val is not None):\r\n\t\t\t\tY_val_predict = individual.get_output(self.X_val)\r\n\t\t\t\tfitness_value_validate = self.fitness(Y_val_predict, self.Y_val)\r\n\t\t\t\taverage_fitness_validate += fitness_value_validate\r\n\t\t\t\tpopulation_count_validate += 1\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Please speficy validate dataset\")\r\n\t\t\t\texit()\r\n\r\n\t\tY_val_predict = best_individual.get_output(self.X_val)\r\n\t\tbest_fitness_validate = self.fitness(Y_val_predict, self.Y_val)\r\n\r\n\t\taverage_fitness_train /= population_count_train\r\n\t\taverage_fitness_validate /= population_count_validate\r\n\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Population evaluated\")\r\n\r\n\t\treturn average_fitness_train, average_fitness_validate, best_fitness_train, best_fitness_validate, best_individual\r\n\r\n ######################################################################################\r\n\tdef make_offspring(self, the_population):\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Making offspring\")\r\n\r\n\t\toffspring_population = []\r\n\t\tfor i in range(self.lam):\r\n\t\t\toffspring_population.append(self.create_offspring(the_population))\r\n\r\n\t\tif (self.verbose >= 1):\r\n\t\t\tprint(\"Made offspring\")\r\n\r\n\t\treturn offspring_population\r\n\r\n ######################################################################################\r\n\tdef create_offspring(self, the_population):\r\n\r\n\t\trandom_chance = random.random()\r\n\t\tif (random_chance <= self.P_C): # crossover proportion\r\n\t\t\tparent1 = self.tournament_selection(the_population)\r\n\t\t\tparent2 = self.tournament_selection(the_population)\r\n\t\t\ttheIndividual = EvoNN.crossoverIndividual(parent1, parent2)\r\n\t\t\tassert len(theIndividual.hidden_layer_size) == NUM_LAYERS # test number of layers is correct\r\n\t\t\treturn theIndividual\r\n\t\telse:\r\n\t\t\toriginal = self.tournament_selection(the_population)\r\n\t\t\ttheIndividual = EvoNN.copyIndividual(original)\r\n\t\t\tassert len(theIndividual.hidden_layer_size) == NUM_LAYERS # test number of layers is correct\r\n\t\t\treturn theIndividual\r\n\r\n ######################################################################################\r\n\t\"\"\"\"Tournament selection\"\"\"\r\n\tdef tournament_selection(self, the_population, tournament_size=2):\r\n\r\n\t\tpopulation_size = len(the_population)\r\n\t\tthe_tournament = []\r\n\t\tfor i in range(tournament_size):\r\n\t\t\tthe_tournament.append(the_population[random.randint(0, population_size-1)])\r\n\r\n\t\t\"\"\"Initialization\"\"\"\r\n\t\tbest_fitness = the_tournament[0].fitness\r\n\t\tbest_individual = the_tournament[0]\r\n\t\tfor i in range(1, tournament_size):\r\n\t\t\tif (the_tournament[i].fitness < best_fitness):\r\n\t\t\t\tbest_fitness = the_tournament[i].fitness\r\n\t\t\t\tbest_individual = the_tournament[i]\r\n\r\n\t\treturn best_individual\r\n\r\n##########################################################################################\r\nclass EvoNN:\r\n\r\n\tdefault_function_dictionary = {0: sigmoid,\r\n 1: tanh}\r\n\r\n##########################################################################################\r\n\tdef __init__(self):\r\n\t\tpass\r\n\r\n##########################################################################################\r\n\t\"\"\"\"Generate new standalone feedforward network\"\"\"\r\n\t@classmethod\r\n\tdef newIndividual(cls, input_size, output_size, final_activation_function, hidden_size=[10], function_dictionary = None):\r\n\r\n\t\ttheIndividual = cls()\t\t\t#theIndividual is a class\r\n\t\tif (function_dictionary is None):\r\n\t\t\ttheIndividual.function_dictionary = self.default_function_dictionary\r\n\t\telse:\r\n\t\t\ttheIndividual.function_dictionary = function_dictionary\r\n\t\ttheIndividual.fitness = float('inf')\t\t# initial fitness is inf\r\n\t\ttheIndividual.input_size = input_size\r\n\r\n\t\ttheIndividual.hidden_layer_size = hidden_size # number of layers, a list\r\n\t\tnum_hidden_layers = len(theIndividual.hidden_layer_size)\r\n\r\n\t\ttheIndividual.hidden_layer_bias = [] # a list of numpy 1d array\r\n\t\ttheIndividual.hidden_layer_functions = [] # a list of numpy 1d array\r\n\t\tfor node_size in hidden_size: # hidden_size is a list\r\n\t\t\ttheIndividual.hidden_layer_bias.append(np.random.uniform(size=(node_size)))\r\n\t\t\ttheIndividual.hidden_layer_functions.append(np.random.randint( len(theIndividual.function_dictionary.keys()), size=node_size ))\r\n\r\n\t\ttheIndividual.output_size = output_size\r\n\t\ttheIndividual.final_activation = final_activation_function # softmax, probability function\r\n\r\n\t\ttheIndividual.input_to_hidden_matrix = np.random.uniform(size=(\tinput_size, hidden_size[0]))\r\n\t\tif (num_hidden_layers > 1):\r\n\t\t\ttheIndividual.hidden_to_hidden_matrix = []\r\n\t\t\tfor curr_layer in range(num_hidden_layers - 1):\r\n\t\t\t\ttheIndividual.hidden_to_hidden_matrix.append(np.random.uniform(size=(hidden_size[curr_layer], hidden_size[curr_layer + 1])))\r\n\t\ttheIndividual.hidden_to_output_matrix = np.random.uniform(size=( hidden_size[-1], output_size))\r\n\r\n\t\treturn theIndividual\r\n\r\n##########################################################################################\r\n\t@classmethod\r\n\tdef crossoverIndividual(cls, individual1, individual2):\r\n\r\n\t\ttheIndividual = cls() # the offspring individual\r\n\t\ttheIndividual.function_dictionary = individual1.function_dictionary\r\n\r\n\t\tinput_size = individual1.input_to_hidden_matrix.shape[0]\r\n\t\toutput_size = individual1.hidden_to_output_matrix.shape[1]\r\n\r\n\t\ttheIndividual.fitness = float('inf')\r\n\t\ttheIndividual.input_size = input_size\r\n\r\n\t\thidden_size = individual1.hidden_layer_size # a list array\r\n\t\tnum_hidden_layers = len(hidden_size)\r\n\r\n\t\t# generate offspring arch\r\n\t\ttheIndividual.hidden_layer_size = copy.deepcopy(hidden_size)\r\n\t\ttheIndividual.hidden_layer_bias = []\r\n\t\ttheIndividual.hidden_layer_functions = []\r\n\t\tfor node_size in hidden_size:\r\n\t\t\ttheIndividual.hidden_layer_bias.append(np.zeros(node_size))\r\n\t\t\ttheIndividual.hidden_layer_functions.append(np.zeros(node_size))\r\n\r\n\t\ttheIndividual.output_size = output_size\r\n\t\ttheIndividual.final_activation = individual1.final_activation\r\n\r\n\t\t\"\"\"crossover activation function and bias\"\"\"\r\n\t\tfor layer in range(num_hidden_layers):\r\n\t\t\t# crossover activation function\r\n\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[layer]))\r\n\t\t\ttheIndividual.hidden_layer_functions[layer][probablity_matrix <= 0.5] = individual1.hidden_layer_functions[layer][probablity_matrix <= 0.5]\r\n\t\t\ttheIndividual.hidden_layer_functions[layer][probablity_matrix > 0.5] = individual2.hidden_layer_functions[layer][probablity_matrix > 0.5]\r\n\r\n\t\t\t# crossover bias\r\n\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[layer]))\r\n\t\t\ttheIndividual.hidden_layer_bias[layer][probablity_matrix <= 0.5] = individual1.hidden_layer_bias[layer][probablity_matrix <= 0.5]\r\n\t\t\ttheIndividual.hidden_layer_bias[layer][probablity_matrix > 0.5] = individual2.hidden_layer_bias[layer][probablity_matrix > 0.5]\r\n\r\n\t\t\"\"\"crossover weight matrix\"\"\"\r\n\t\t# input to hidden matrix\r\n\t\ttheIndividual.input_to_hidden_matrix = np.zeros((input_size, hidden_size[0]))\r\n\t\tprobablity_matrix = np.random.uniform(size=(input_size, hidden_size[0]))\r\n\r\n\t\ttheIndividual.input_to_hidden_matrix[probablity_matrix <= 0.5] = individual1.input_to_hidden_matrix[probablity_matrix <= 0.5]\r\n\t\ttheIndividual.input_to_hidden_matrix[probablity_matrix > 0.5] = individual2.input_to_hidden_matrix[probablity_matrix > 0.5]\r\n\r\n\t\t# hidden to hidden matrix\r\n\t\tif (num_hidden_layers > 1):\r\n\t\t\ttheIndividual.hidden_to_hidden_matrix = []\r\n\t\t\tfor curr_layer in range(num_hidden_layers - 1):\r\n\t\t\t\tnew_hidden_to_hidden_matrix = np.zeros((hidden_size[curr_layer], hidden_size[curr_layer + 1]))\r\n\t\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[curr_layer], hidden_size[curr_layer + 1]))\r\n\r\n\t\t\t\tnew_hidden_to_hidden_matrix[probablity_matrix <= 0.5] = individual1.hidden_to_hidden_matrix[curr_layer][probablity_matrix <= 0.5]\r\n\t\t\t\tnew_hidden_to_hidden_matrix[probablity_matrix > 0.5] = individual2.hidden_to_hidden_matrix[curr_layer][probablity_matrix > 0.5]\r\n\r\n\t\t\t\ttheIndividual.hidden_to_hidden_matrix.append(new_hidden_to_hidden_matrix)\r\n\r\n\t\t# hidden to output matrix\r\n\t\ttheIndividual.hidden_to_output_matrix = np.zeros((hidden_size[-1], output_size))\r\n\t\tprobablity_matrix = np.random.uniform(size=((hidden_size[-1], output_size)))\r\n\r\n\t\ttheIndividual.hidden_to_output_matrix[probablity_matrix <= 0.5] = individual1.hidden_to_output_matrix[probablity_matrix <= 0.5]\r\n\t\ttheIndividual.hidden_to_output_matrix[probablity_matrix > 0.5] = individual2.hidden_to_output_matrix[probablity_matrix > 0.5]\r\n\r\n\t\treturn theIndividual\r\n\r\n##########################################################################################\r\n\t\"\"\"\"Deep copy individual\"\"\"\r\n\t@classmethod\r\n\tdef copyIndividual(cls, theOriginal):\r\n\r\n\t\ttheIndividual = cls()\r\n\t\ttheIndividual.function_dictionary = theOriginal.function_dictionary\r\n\r\n\t\tinput_size = theOriginal.input_to_hidden_matrix.shape[0]\r\n\t\toutput_size = theOriginal.hidden_to_output_matrix.shape[1]\r\n\r\n\t\ttheIndividual.fitness = float('inf')\r\n\t\ttheIndividual.input_size = input_size\r\n\t\ttheIndividual.hidden_layer_size = copy.deepcopy(theOriginal.hidden_layer_size)\r\n\r\n\t\t# deep copy bias and activation function\r\n\t\ttheIndividual.hidden_layer_bias = copy.deepcopy(theOriginal.hidden_layer_bias)\r\n\t\ttheIndividual.hidden_layer_functions = copy.deepcopy(theOriginal.hidden_layer_functions)\r\n\r\n\t\ttheIndividual.output_size = output_size\r\n\t\ttheIndividual.final_activation = theOriginal.final_activation\r\n\r\n\t\t# deep copy weight matrix\r\n\t\ttheIndividual.input_to_hidden_matrix = copy.deepcopy(theOriginal.input_to_hidden_matrix)\r\n\t\tif (len(theIndividual.hidden_layer_size) > 1):\r\n\t\t\ttheIndividual.hidden_to_hidden_matrix = copy.deepcopy(theOriginal.hidden_to_hidden_matrix)\r\n\t\ttheIndividual.hidden_to_output_matrix = copy.deepcopy(theOriginal.hidden_to_output_matrix)\r\n\r\n\t\treturn theIndividual\r\n\r\n##########################################################################################\r\n\tdef mutate_matrix(self, the_matrix, probablity, radius):\r\n\r\n\t\tprobablity_matrix = np.random.uniform(size=(the_matrix.shape))\r\n\t\tmutation_matrix = np.random.uniform(low = -radius, high=radius, size=(the_matrix.shape))\r\n\t\tthe_matrix[probablity_matrix <= probablity] += mutation_matrix[probablity_matrix <= probablity]\r\n\r\n\t\treturn the_matrix\r\n\r\n##########################################################################################\r\n\tdef mutate(self, P_m, P_mf, R_m, P_b, R_b):\r\n\r\n\t\tinput_size = self.input_size\r\n\t\thidden_size= self.hidden_layer_size # a list\r\n\t\tnum_hidden_layers = len(self.hidden_layer_size)\r\n\t\toutput_size = self.hidden_to_output_matrix.shape[1]\r\n\r\n\t\t\"\"\"\"Mutate input to hidden matrix\"\"\"\r\n\t\tself.input_to_hidden_matrix = self.mutate_matrix(self.input_to_hidden_matrix, P_m, R_m)\r\n\r\n\t\t\"\"\"\"Mutate activation function and bias\"\"\"\r\n\t\tfunction_number = len(self.function_dictionary.keys())\r\n\r\n\t\tfor layer in range(num_hidden_layers):\r\n\t\t\t# mutate activation function\r\n\t\t\tprobablity_matrix = np.random.uniform(size=(hidden_size[layer]))\r\n\t\t\tfunction_mutation_matrix = np.random.randint(0, function_number - 1,size=(hidden_size[layer]))\r\n\t\t\tself.hidden_layer_functions[layer][probablity_matrix <= P_mf] = function_mutation_matrix[probablity_matrix <= P_mf]\r\n\r\n\t\t\t# mutate bias\r\n\t\t\tself.hidden_layer_bias[layer] = self.mutate_matrix(self.hidden_layer_bias[layer], P_b, R_b)\r\n\r\n\t\t\"\"\"Mutate hidden to hidden matrix\"\"\"\r\n\t\tif (num_hidden_layers > 1):\r\n\t\t\tfor layer in range(num_hidden_layers - 1):\r\n\t\t\t\tself.hidden_to_hidden_matrix[layer] = self.mutate_matrix(self.hidden_to_hidden_matrix[layer], P_m, R_m)\r\n\r\n\t\t\"\"\"Mutate hidden to output matrix\"\"\"\r\n\t\tself.hidden_to_output_matrix = self.mutate_matrix(self.hidden_to_output_matrix, P_m, R_m)\r\n\r\n##########################################################################################\r\n\t\"\"\"Output is a 2d (sample_size, classification_number) array\"\"\"\r\n\tdef get_output(self, X_train):\r\n\r\n\t\tsample_size = X_train.shape[0]\r\n\t\thidden_layer_input = np.dot(X_train, self.input_to_hidden_matrix) + np.tile(self.hidden_layer_bias[0], (sample_size, 1)) # y = wx+b\r\n\r\n\t\tfor i in range(hidden_layer_input.shape[1]): # z = f(wx+b)\r\n\t\t\tfunctionIndex = self.hidden_layer_functions[0][i]\r\n\t\t\tmyFunction = self.function_dictionary[functionIndex]\r\n\t\t\thidden_layer_input[:, i] = myFunction(hidden_layer_input[:, i])\r\n\r\n\t\thidden_layer_matrix = np.copy(hidden_layer_input) # deep copy\r\n\t\tif (len(self.hidden_layer_size) > 1):\r\n\t\t\tfor i in range(len(self.hidden_layer_size) - 1): # aw+b\r\n\t\t\t\thidden_layer_matrix = np.dot(hidden_layer_matrix, self.hidden_to_hidden_matrix[i]) + np.tile(self.hidden_layer_bias[i+1],(sample_size, 1)) # y = wx+b\r\n\r\n\t\t\t\tfor j in range(hidden_layer_matrix.shape[1]): # z = f(wx+b)\r\n\t\t\t\t\tfunctionIndex = self.hidden_layer_functions[i+1][j]\r\n\t\t\t\t\tmyFunction = self.function_dictionary[functionIndex]\r\n\t\t\t\t\thidden_layer_matrix[:, j] = myFunction(hidden_layer_matrix[:, j])\r\n\r\n\t\toutput_layer_input = np.dot(hidden_layer_matrix, self.hidden_to_output_matrix)\r\n\r\n\t\toutput = self.final_activation(output_layer_input)\r\n\r\n\t\treturn output\r\n"
] | [
[
"numpy.random.uniform",
"numpy.tile",
"numpy.zeros",
"numpy.random.seed",
"numpy.copy",
"numpy.exp",
"numpy.dot",
"numpy.random.randint",
"numpy.tanh",
"numpy.mean"
]
] |
jmarine/ezeeai | [
"091b4ce3bc5794c534084bff3301b15ba8a9be1a"
] | [
"ezeeai/core/explainer.py"
] | [
"from lime import lime_tabular, lime_image\nfrom scipy.misc import imresize\nimport numpy as np\nimport tensorflow as tf\n\n\nclass TabularExplainer:\n\n def __init__(self, dataset, verbose=True):\n\n train_dataset, training_labels = dataset.make_numpy_array(dataset.get_train_file())\n\n mode = dataset.get_mode()\n categorical_features, categorical_index, categorical_names = dataset.get_categorical_features()\n unique = dataset.get_target_labels()\n\n self._mode = mode\n self.dataset = dataset\n\n self._explainer = lime_tabular.LimeTabularExplainer(train_dataset,\n feature_names=dataset.get_feature_names(),\n class_names=unique,\n categorical_features=categorical_index,\n categorical_names=categorical_names,\n training_labels=training_labels,\n verbose=verbose,\n mode=self._mode)\n\n def explain_instance(self, model, features, num_features=5, top_labels=3, sel_target=None):\n\n sample = self.dataset.create_feat_array(features)\n features = {k: features[k] for k in self.dataset.get_feature_names()}\n\n def predict_fn(x):\n x = x.reshape(-1, len(features))\n\n local_features = {k: x[:, i] for i, k in enumerate(features.keys())}\n local_features = self.dataset.from_array(local_features)\n\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(x=local_features,\n y=None, num_epochs=1, shuffle=False)\n with tf.device('/cpu:0'): # TODO maybe check if gpu is free\n predictions = list(model.predict(input_fn=predict_input_fn))\n\n if self._mode == 'classification':\n return np.array([x['probabilities'] for x in predictions])\n\n if sel_target:\n tidx = self.dataset.get_targets().index(sel_target)\n return np.array([x['predictions'][tidx] for x in predictions]).reshape(-1)\n\n return np.array([x['predictions'] for x in predictions]).reshape(-1)\n\n if self._mode == 'classification':\n return self._explainer.explain_instance(sample, predict_fn, num_features=num_features,\n top_labels=top_labels)\n\n return self._explainer.explain_instance(sample, predict_fn, num_features=num_features)\n\n\nclass ImageExplainer:\n\n def __init__(self, dataset, verbose=True):\n self._dataset = dataset\n self._explainer = lime_image.LimeImageExplainer(verbose=verbose)\n\n def explain_instance(self, model, features, num_features=5, top_labels=3, sel_target=None):\n def predict_fn(x):\n x = x.astype(np.float32)\n x = np.apply_along_axis(self._dataset.normalize, 0, x)\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(x=x, y=None, num_epochs=1, shuffle=False)\n with tf.device('/cpu:0'): # TODO maybe check if gpu is free\n probabilities = list(model.predict(input_fn=predict_input_fn))\n return np.array([x['probabilities'] for x in probabilities])\n\n features = imresize(features, self._dataset.get_image_size(), interp='bilinear')\n\n explain_result = self._explainer.explain_instance(features, predict_fn, batch_size=100,\n num_features=num_features,\n labels=self._dataset.get_class_names(),\n top_labels=len(self._dataset.get_class_names()))\n\n features = features.astype(np.float32)\n\n features = self._dataset.normalize(features)\n\n predict_input_fn = tf.estimator.inputs.numpy_input_fn(x=features[np.newaxis, ...], y=None, num_epochs=1,\n shuffle=False)\n with tf.device('/cpu:0'): # TODO maybe check if gpu is free\n predictions = list(model.predict(input_fn=predict_input_fn))\n\n return explain_result, predictions[0]['probabilities']\n"
] | [
[
"numpy.array",
"tensorflow.estimator.inputs.numpy_input_fn",
"tensorflow.device",
"numpy.apply_along_axis"
]
] |
murthyn/composer | [
"2a04cf387dd8558556500f7ef2bc6d3d131043d5"
] | [
"composer/models/resnets.py"
] | [
"# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"The CIFAR ResNet torch module.\n\nSee the :doc:`Model Card </model_cards/resnet>` for more details.\n\"\"\"\n\n# Code below adapted from https://github.com/facebookresearch/open_lth\n# and https://github.com/pytorch/vision\n\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom composer.models import Initializer\n\n__all__ = [\"CIFAR_ResNet\"]\n\n\nclass CIFAR_ResNet(nn.Module):\n \"\"\"A residual neural network as originally designed for CIFAR-10.\"\"\"\n\n class Block(nn.Module):\n \"\"\"A ResNet block.\"\"\"\n\n def __init__(self, f_in: int, f_out: int, downsample: bool = False):\n super(CIFAR_ResNet.Block, self).__init__()\n\n stride = 2 if downsample else 1\n self.conv1 = nn.Conv2d(f_in, f_out, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(f_out)\n self.conv2 = nn.Conv2d(f_out, f_out, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(f_out)\n self.relu = nn.ReLU(inplace=True)\n\n # No parameters for shortcut connections.\n if downsample or f_in != f_out:\n self.shortcut = nn.Sequential(\n nn.Conv2d(f_in, f_out, kernel_size=1, stride=2, bias=False),\n nn.BatchNorm2d(f_out),\n )\n else:\n self.shortcut = nn.Sequential()\n\n def forward(self, x: torch.Tensor):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n return self.relu(out)\n\n def __init__(self, plan: List[Tuple[int, int]], initializers: List[Initializer], outputs: int = 10):\n super(CIFAR_ResNet, self).__init__()\n outputs = outputs or 10\n\n self.num_classes = outputs\n\n # Initial convolution.\n current_filters = plan[0][0]\n self.conv = nn.Conv2d(3, current_filters, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(current_filters)\n self.relu = nn.ReLU(inplace=True)\n\n # The subsequent blocks of the ResNet.\n blocks = []\n for segment_index, (filters, num_blocks) in enumerate(plan):\n for block_index in range(num_blocks):\n downsample = segment_index > 0 and block_index == 0\n blocks.append(CIFAR_ResNet.Block(current_filters, filters, downsample))\n current_filters = filters\n\n self.blocks = nn.Sequential(*blocks)\n\n # Final fc layer. Size = number of filters in last segment.\n self.fc = nn.Linear(plan[-1][0], outputs)\n self.criterion = nn.CrossEntropyLoss()\n\n for initializer in initializers:\n initializer = Initializer(initializer)\n self.apply(initializer.get_initializer())\n\n def forward(self, x: torch.Tensor):\n out = self.relu(self.bn(self.conv(x)))\n out = self.blocks(out)\n out = F.avg_pool2d(out, out.size()[3])\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n @staticmethod\n def is_valid_model_name(model_name: str):\n valid_model_names = [f\"cifar_resnet_{layers}\" for layers in (20, 56)]\n return (model_name in valid_model_names)\n\n @staticmethod\n def get_model_from_name(model_name: str, initializers: List[Initializer], outputs: int = 10):\n \"\"\"The naming scheme for a ResNet is ``'cifar_resnet_D[_W]'``.\n\n D is the model depth (e.g. ``'cifar_resnet56'``)\n \"\"\"\n\n if not CIFAR_ResNet.is_valid_model_name(model_name):\n raise ValueError('Invalid model name: {}'.format(model_name))\n\n depth = int(model_name.split('_')[2])\n if len(model_name.split('_')) == 3:\n width = 16\n else:\n width = int(model_name.split('_')[4])\n\n if (depth - 2) % 3 != 0:\n raise ValueError('Invalid CIFAR_ResNet depth: {}'.format(depth))\n num_blocks = (depth - 2) // 6\n\n model_arch = {\n 56: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],\n 20: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],\n }\n\n return CIFAR_ResNet(model_arch[depth], initializers, outputs)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.ReLU"
]
] |
evgeniya-egupova/openvino | [
"a9a583eb42d43322b39b95b164b5b22c4f341111"
] | [
"src/bindings/python/tests/test_ngraph/test_core.py"
] | [
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nimport openvino.runtime.opset8 as ov\nfrom openvino.runtime.impl import Dimension, Function, PartialShape, Shape\n\n\ndef test_dimension():\n dim = Dimension()\n assert dim.is_dynamic\n assert not dim.is_static\n assert repr(dim) == \"<Dimension: ?>\"\n\n dim = Dimension.dynamic()\n assert dim.is_dynamic\n assert not dim.is_static\n assert repr(dim) == \"<Dimension: ?>\"\n\n dim = Dimension(10)\n assert dim.is_static\n assert len(dim) == 10\n assert dim.get_length() == 10\n assert dim.get_min_length() == 10\n assert dim.get_max_length() == 10\n assert repr(dim) == \"<Dimension: 10>\"\n\n dim = Dimension(5, 15)\n assert dim.is_dynamic\n assert dim.get_min_length() == 5\n assert dim.get_max_length() == 15\n assert repr(dim) == \"<Dimension: [5, 15]>\"\n\n\ndef test_dimension_comparisons():\n d1 = Dimension.dynamic()\n d2 = Dimension.dynamic()\n assert d1 == d2\n assert d1 == -1\n assert d1.refines(d2)\n assert d1.relaxes(d2)\n assert d2.refines(d1)\n assert d2.relaxes(d1)\n assert d2.compatible(d1)\n assert d2.same_scheme(d1)\n\n d1 = Dimension.dynamic()\n d2 = Dimension(3)\n assert d1 != d2\n assert d2 == 3\n assert not d1.refines(d2)\n assert d1.relaxes(d2)\n assert d2.refines(d1)\n assert not d2.relaxes(d1)\n assert d2.compatible(d1)\n assert not d2.same_scheme(d1)\n\n d1 = Dimension(3)\n d2 = Dimension(3)\n assert d1 == d2\n assert d1.refines(d2)\n assert d1.relaxes(d2)\n assert d2.refines(d1)\n assert d2.relaxes(d1)\n assert d2.compatible(d1)\n assert d2.same_scheme(d1)\n\n d1 = Dimension(4)\n d2 = Dimension(3)\n assert d1 != d2\n assert not d1.refines(d2)\n assert not d1.relaxes(d2)\n assert not d2.refines(d1)\n assert not d2.relaxes(d1)\n assert not d2.compatible(d1)\n assert not d2.same_scheme(d1)\n\n\ndef test_partial_shape():\n ps = PartialShape([1, 2, 3, 4])\n assert ps.is_static\n assert not ps.is_dynamic\n assert ps.rank == 4\n assert repr(ps) == \"<PartialShape: {1,2,3,4}>\"\n assert ps.get_dimension(0) == Dimension(1)\n assert ps.get_dimension(1) == Dimension(2)\n assert ps.get_dimension(2) == Dimension(3)\n assert ps.get_dimension(3) == Dimension(4)\n\n shape = Shape([1, 2, 3])\n ps = PartialShape(shape)\n assert ps.is_static\n assert not ps.is_dynamic\n assert ps.all_non_negative\n assert ps.rank == 3\n assert list(ps.get_shape()) == [1, 2, 3]\n assert list(ps.get_max_shape()) == [1, 2, 3]\n assert list(ps.get_min_shape()) == [1, 2, 3]\n assert list(ps.to_shape()) == [1, 2, 3]\n assert repr(shape) == \"<Shape: {1, 2, 3}>\"\n assert repr(ps) == \"<PartialShape: {1,2,3}>\"\n\n ps = PartialShape([Dimension(1), Dimension(2), Dimension(3), Dimension.dynamic()])\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.all_non_negative\n assert ps.rank == 4\n assert list(ps.get_min_shape()) == [1, 2, 3, 0]\n assert list(ps.get_max_shape())[3] > 1000000000\n assert repr(ps) == \"<PartialShape: {1,2,3,?}>\"\n assert ps.get_dimension(0) == Dimension(1)\n assert ps.get_dimension(1) == Dimension(2)\n assert ps.get_dimension(2) == Dimension(3)\n assert ps.get_dimension(3) == Dimension.dynamic()\n\n ps = PartialShape([1, 2, 3, -1])\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.all_non_negative\n assert ps.rank == 4\n assert list(ps.get_min_shape()) == [1, 2, 3, 0]\n assert list(ps.get_max_shape())[3] > 1000000000\n assert repr(ps) == \"<PartialShape: {1,2,3,?}>\"\n\n ps = PartialShape.dynamic()\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.rank == Dimension.dynamic()\n assert list(ps.get_min_shape()) == []\n assert list(ps.get_max_shape()) == []\n assert repr(ps) == \"<PartialShape: ?>\"\n\n ps = PartialShape.dynamic(r=Dimension(2))\n assert not ps.is_static\n assert ps.is_dynamic\n assert ps.rank == 2\n assert 2 == ps.rank\n assert list(ps.get_min_shape()) == [0, 0]\n assert list(ps.get_max_shape())[0] > 1000000000\n assert repr(ps) == \"<PartialShape: {?,?}>\"\n\n\ndef test_partial_shape_compatible():\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape([3])\n ps2 = PartialShape.dynamic()\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape([4])\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape([2, -1, 3, -1, 5])\n ps2 = PartialShape([2, -1, -1, 4, 5])\n assert ps1.compatible(ps2)\n\n ps1 = PartialShape([2, -1, 3, -1, 5])\n ps2 = PartialShape([1, -1, -1, 4, 5])\n assert not ps1.compatible(ps2)\n\n\ndef test_partial_shape_same_scheme():\n ps1 = PartialShape([1, 2, -1])\n ps2 = PartialShape([1, 3, -1])\n assert not ps1.same_scheme(ps2)\n\n ps1 = PartialShape([1, 2, -1])\n ps2 = PartialShape([1, 2, -1])\n assert ps1.same_scheme(ps2)\n\n ps1 = PartialShape([1, 2, 3])\n ps2 = PartialShape([1, 2, 3])\n assert ps1.same_scheme(ps2)\n\n ps1 = PartialShape([-1, 2, 3])\n ps2 = PartialShape([1, -1, 3])\n assert not ps1.same_scheme(ps2)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1.same_scheme(ps2)\n\n\ndef test_partial_shape_refinement():\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1.refines(ps2)\n assert ps1.relaxes(ps2)\n assert ps2.refines(ps1)\n assert ps2.relaxes(ps1)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape([3, -1, 7, 9])\n assert not ps1.refines(ps2)\n assert ps1.relaxes(ps2)\n assert ps2.refines(ps1)\n assert not ps2.relaxes(ps1)\n\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape([3, 5, 7, 9])\n assert not ps1.refines(ps2)\n assert ps1.relaxes(ps2)\n assert ps2.refines(ps1)\n assert not ps2.relaxes(ps1)\n\n\ndef test_partial_shape_equals():\n ps1 = PartialShape.dynamic()\n ps2 = PartialShape.dynamic()\n assert ps1 == ps2\n\n ps1 = PartialShape([1, 2, 3])\n ps2 = PartialShape([1, 2, 3])\n assert ps1 == ps2\n\n shape = Shape([1, 2, 3])\n ps = PartialShape([1, 2, 3])\n assert shape == ps\n\n\ndef test_repr_dynamic_shape():\n shape = PartialShape([-1, 2])\n parameter_a = ov.parameter(shape, dtype=np.float32, name=\"A\")\n parameter_b = ov.parameter(shape, dtype=np.float32, name=\"B\")\n model = parameter_a + parameter_b\n function = Function(model, [parameter_a, parameter_b], \"simple_dyn_shapes_graph\")\n\n assert repr(function) == \"<Function: 'simple_dyn_shapes_graph' ({?,2})>\"\n\n ops = function.get_ordered_ops()\n for op in ops:\n assert \"{?,2}\" in repr(op)\n\n\ndef test_discrete_type_info():\n data_shape = [6, 12, 10, 24]\n data_parameter = ov.parameter(data_shape, name=\"Data\", dtype=np.float32)\n k = np.int32(3)\n axis = np.int32(1)\n n1 = ov.topk(data_parameter, k, axis, \"max\", \"value\")\n n2 = ov.topk(data_parameter, k, axis, \"max\", \"value\")\n n3 = ov.sin(0.2)\n\n assert n1.type_info.name == \"TopK\"\n assert n3.type_info.name == \"Sin\"\n assert n1.get_type_info().name == \"TopK\"\n assert n3.get_type_info().name == \"Sin\"\n assert n1.type_info.name == n2.type_info.name\n assert n1.type_info.version == n2.type_info.version\n assert n1.type_info.parent == n2.type_info.parent\n assert n1.get_type_info().name == n2.get_type_info().name\n assert n1.get_type_info().version == n2.get_type_info().version\n assert n1.get_type_info().parent == n2.get_type_info().parent\n assert n1.get_type_info().name != n3.get_type_info().name\n assert n1.get_type_info().name > n3.get_type_info().name\n assert n1.get_type_info().name >= n3.get_type_info().name\n assert n3.get_type_info().name < n1.get_type_info().name\n assert n3.get_type_info().name <= n1.get_type_info().name\n"
] | [
[
"numpy.int32"
]
] |
majkee15/HiddenMarkovJumpProcess-RLEnvironment | [
"730ef636bfa51f6137268ab7760f9a504ba583db"
] | [
"control/base.py"
] | [
"import os\n\nimport logging\nimport datetime\n\n\nimport numpy as np\nimport tensorflow as tf\nfrom gym.spaces import Box, Discrete\nfrom gym.utils import colorize\n\nfrom control.utils.misc import Config\nfrom control.utils.misc import REPO_ROOT, RESOURCE_ROOT\n\nfrom abc import ABC, abstractmethod\n\n\n\nclass TrainConfigBase(Config):\n lr = 0.001\n n_steps = 10000\n warmup_steps = 5000\n batch_size = 64\n log_every_step = 1000\n\n # give an extra bonus if done; only needed for certain tasks.\n done_reward = None\n\n\nclass Policy(ABC):\n\n def __init__(self, env, name, training=True, deterministic=False):\n self.env = env\n\n self.training = training\n self.name = self.__class__.__name__ + '--' + name\n\n if deterministic:\n np.random.seed(1)\n\n # Logger\n self.logger = logging.getLogger(name)\n logging.basicConfig()\n self.logger.setLevel(os.getenv('LOG_LEVEL', 'INFO'))\n # self.logger.info('Instantiated class ' + self.__class__.__name__)\n\n @property\n def act_size(self):\n # number of options of an action; this only makes sense for discrete actions.\n if isinstance(self.env.action_space, Discrete):\n return self.env.action_space.n\n else:\n return None\n\n @property\n def act_dim(self):\n # dimension of an action; this only makes sense for continuous actions.\n if isinstance(self.env.action_space, Box):\n return list(self.env.action_space.shape)\n else:\n return []\n\n @property\n def state_dim(self):\n # dimension of a state.\n return list(self.env.observation_space.shape)\n\n @staticmethod\n def obs_to_inputs(self, ob):\n return ob.flatten()\n\n @abstractmethod\n def get_action(self, state, **kwargs):\n pass\n\n @abstractmethod\n def build(self):\n pass\n\n @abstractmethod\n def train(self, *args, **kwargs):\n pass\n\n\n def evaluate(self, n_episodes):\n # TODO: evaluate uses default setting of the environment, i.g., random start\n # this should be done in parallel\n # and it should be depending on a starting state!\n reward_history = []\n\n for i in range(n_episodes):\n ob = self.env.reset()\n done = False\n reward = 0.\n while not done:\n a, q = self.get_action(ob, epsilon=0.0)\n new_ob, r, done, _ = self.env.step(a)\n # self.env.render()\n reward += r\n ob = new_ob\n\n reward_history.append(reward)\n\n #print(\"Avg. reward over {} episodes: {:.4f}\".format(n_episodes, np.mean(reward_history)))\n self.logger.info(\"Avg. reward over {} episodes: {:.4f}\".format(n_episodes, np.mean(reward_history)))\n return reward_history\n\n\nclass BaseModelMixin(ABC):\n\n def __init__(self, model_name, experiment_name=None):\n self._saver = None\n self._writer = None\n self._experiment_name = experiment_name\n self.model_name = model_name\n self.current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n def _get_dir(self, dir_name):\n if self._experiment_name is not None:\n path = os.path.join(RESOURCE_ROOT, dir_name, self._experiment_name, self.model_name, self.current_time)\n else:\n path = os.path.join(RESOURCE_ROOT, dir_name, self.model_name, self.current_time)\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def log_dir(self):\n return self._get_dir('training_logs')\n\n @property\n def checkpoint_dir(self):\n return self._get_dir('checkpoints')\n\n @property\n def model_dir(self):\n return self._get_dir('models')\n\n @property\n def tb_dir(self):\n # tensorboard\n return self._get_dir('tb_logs')\n\n @property\n def writer(self):\n if self._writer is None:\n self._writer = tf.summary.create_file_writer(self.tb_dir)\n return self._writer\n"
] | [
[
"tensorflow.summary.create_file_writer",
"numpy.random.seed",
"numpy.mean"
]
] |
tenpercent/pytorch | [
"7f996b855c5070ab4a6bea0f451c8a22c0ce2394"
] | [
"test/test_ops_jit.py"
] | [
"# Owner(s): [\"module: unknown\"]\n\nfrom functools import partial\n\nimport torch\n\nfrom torch.testing import FileCheck\nfrom torch.testing._internal.common_utils import \\\n (run_tests, IS_SANDCASTLE, clone_input_helper, first_sample)\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes\nfrom torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference\nfrom torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, check_alias_annotation\nfrom torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining, is_lambda\n\n\n# TODO: fixme https://github.com/pytorch/pytorch/issues/68972\ntorch.set_default_dtype(torch.float32)\n\n# variant testing is only done with torch.float and torch.cfloat to avoid\n# excessive test times and maximize signal to noise ratio\n_variant_ops = partial(ops, dtypes=OpDTypes.supported,\n allowed_dtypes=(torch.float, torch.cfloat))\n\n\n\n# Tests operators for consistency between JIT and eager, also checks\n# correctness of JIT specific alias schemas and intended\n# autodifferentiation behavior.\n# Inherits from JitCommonTestCase instead of TestCase directly to share\n# functionality with original test_jit.py method operator tests\nclass TestJit(JitCommonTestCase):\n exact_dtype = True\n\n # Tests that the forward and backward passes of operations produce the\n # same values for the cross-product of op variants (function, method, inplace)\n # and runtimes (eager, traced, scripted).\n # TODO WARNING: inplace x {traced, scripted} not currently tested\n @_variant_ops(op_db)\n def test_variant_consistency_jit(self, device, dtype, op):\n _requires_grad = op.supports_autograd and (dtype.is_floating_point or\n op.supports_complex_autograd(torch.device(device).type))\n\n include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex\n samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad, include_conjugated_inputs=include_conjugated_inputs)\n\n # Acquires variants to test\n func = op.get_op()\n method = op.get_method()\n variants = {\n # TODO: inplace tests currently fail, fix and add inplace variant\n 'function': func, 'method': method,\n }\n\n # TODO: find better way to standardize on op registration itself..\n has_fake_function = op.name in [\"resize_\", 'resize_as_']\n\n if has_fake_function:\n variants = {'method': getattr(torch.Tensor, op.name)}\n samples = op.sample_inputs(device, dtype, requires_grad=False)\n\n support_script = op.supports_scripting\n\n tested = False\n for sample in samples:\n # Test traced and scripted consistency\n for func_type, variant in variants.items():\n if variant is None:\n continue\n\n # scripting and check_alias_analysis do not work with lambdas\n # lambdas are typically used as a way to simulate methods without\n # functional variants, so rely on the other variant for testing\n # for now\n if is_lambda(variant):\n continue\n\n tested = True\n\n # Create accessor for script function variant\n name = op.name + '_' if func_type == 'inplace' else op.name\n\n # run with disable_autodiff_subgraph_inlining(True) to test\n # autodiff support. Context manager forces the graph to contain\n # DifferentiableGraph nodes if they are present\n with disable_autodiff_subgraph_inlining():\n # Check scripted forward, grad, and grad grad\n if support_script:\n script_fn = create_script_fn(self, name, func_type)\n\n def out_fn(output):\n # Processes the output for autograd\n if sample.output_process_fn_grad is not None:\n return sample.output_process_fn_grad(output)\n return output\n\n def get_sample():\n return clone_input_helper(sample.input) if op.name[-1] == '_' else sample.input\n\n if support_script:\n check_against_reference(self,\n script_fn,\n func,\n out_fn,\n (get_sample(),) + sample.args,\n sample.kwargs,\n no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)\n\n # Check traced forward, grad, and grad grad\n # TODO: fix tracing here\n supports_tracing = not has_fake_function\n if op.assert_jit_shape_analysis:\n self.assertTrue(supports_tracing)\n\n if supports_tracing:\n traced_fn = create_traced_fn(self, variant)\n check_against_reference(self,\n traced_fn,\n func,\n out_fn,\n (get_sample(),) + sample.args,\n sample.kwargs,\n no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)\n\n # Check alias annotation schema for correctness (make\n # sure inputs that aren't supposed to be modified aren't)\n # Note: only runs in float32 because schema isn't affected by dtype,\n # so running it on all dtypes is would be excessive\n if dtype == torch.float32:\n # TODO: no reason why we cant run this with tracing graph\n if support_script and op.name != \"rsub\":\n check_alias_annotation(name, (get_sample(),) + sample.args, sample.kwargs,\n func_type=func_type, aten_name=op.aten_name)\n\n # TODO: use script graph as well\n checked_shape_analysis = False\n if supports_tracing:\n out = variant(get_sample(), *sample.args, **sample.kwargs)\n\n # right now, tuple of outputs and tensor output supported\n # TODO: list of tensor outputs\n tuple_of_tensors = isinstance(out, tuple) and all([isinstance(elem, torch.Tensor) for elem in out])\n\n if isinstance(out, torch.Tensor) or tuple_of_tensors:\n if tuple_of_tensors:\n sizes = [elem.size() for elem in out]\n else:\n sizes = out.size()\n self.checkShapeAnalysis(sizes, traced_fn.graph, op.assert_jit_shape_analysis)\n checked_shape_analysis = True\n if op.assert_jit_shape_analysis:\n self.assertTrue(checked_shape_analysis)\n\n # Check autodifferentiation of nodes for traced and scripted graphs, only need to check once per sample\n if dtype is torch.float32:\n # Sandcastle doesn't fuse nodes\n if IS_SANDCASTLE:\n # fusible nodes are expected to be found in FusionGroups in the DifferentiableGraphs\n nonfusible_nodes = op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes\n fusible_nodes = []\n else:\n nonfusible_nodes = op.autodiff_nonfusible_nodes\n fusible_nodes = op.autodiff_fusible_nodes\n\n if supports_tracing:\n self.assertAutodiffNode(traced_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)\n if support_script:\n self.assertAutodiffNode(script_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)\n assert tested, \"JIT Test does not execute any logic\"\n\n # alias testing is only done with torch.float for the same reason\n _alias_ops = partial(ops, dtypes=OpDTypes.supported,\n allowed_dtypes=(torch.float,))\n\n @_alias_ops((op for op in op_db if op.aliases))\n def test_jit_alias_remapping(self, device, dtype, op):\n # Required to avoid undefined value: tensor error in JIT compilation of the function template\n tensor = torch.tensor\n\n # NOTE: only tests on first sample\n samples = op.sample_inputs(device, dtype, requires_grad=True)\n sample = first_sample(self, samples)\n\n # [Scripting Data Preparation]\n # Prepare data for test scripting\n # Below we prepare strings of args/kwargs with and without type annotations.\n # These strings are inserted into function template strings which is then torch scripted.\n # - args string is [\"t0\"] corresponding to the \"input\" tensor required by the op\n # - args_kw is the value of args and strings of kwargs used to call the op (without type annotations), for example,\n # [\"to\", \"1.0\", \"(1,)\", \"True\", \"tensor(1.0)\"] -> def fn(t0): return variant(t0, 1.0, (1,), True, tensor(1.0))\n args = [\"t0\"]\n\n def quote_strs(v):\n if isinstance(v, str):\n return f\"'{v}'\"\n\n return str(v)\n\n args_kw = args + \\\n [f\"{v}\" for v in sample.args] + \\\n [f\"{k}={quote_strs(v)}\" for k, v in sample.kwargs.items()]\n\n # Prepare data for test tracing\n sample_args_kwargs = ()\n if len(sample.args) > 0:\n sample_args_kwargs += (sample.args, )\n if len(sample.kwargs) > 0:\n sample_args_kwargs += (sample.kwargs, )\n\n original_name = op.aten_name\n original_name_inplace = original_name + \"_\"\n expected_dtype = op(sample.input, *sample.args, **sample.kwargs).dtype\n\n for a_op in op.aliases:\n inplace = a_op.inplace_variant\n method_or_inplace = [a_op.inplace_variant, a_op.method_variant]\n variants = (v for v in (a_op.op, a_op.method_variant, a_op.inplace_variant) if v is not None)\n\n # Test scripting:\n for variant in variants:\n variant_name = variant.__name__\n op_name = original_name_inplace if variant is inplace else original_name\n\n if variant in method_or_inplace:\n fn_template = '''\n def _fn(t0{c}):\n return t0.{alias_name}({args_kw})\n '''\n # remove the first input tensor\n script = fn_template.format(\n c=\", \" if len(args_kw[1:]) > 1 else \"\",\n args_kw=\", \".join(args_kw[1:]),\n alias_name=variant_name,\n )\n else:\n fn_template = '''\n def _fn({args}):\n return variant({args_kw})\n '''\n script = fn_template.format(\n args=\", \".join(args),\n args_kw=\", \".join(args_kw),\n )\n scripted = torch.jit.CompilationUnit(script)._fn\n\n if (variant is inplace and not torch.can_cast(expected_dtype, dtype)):\n try:\n inp = clone_input_helper(sample.input)\n scripted(inp)\n except Exception as e:\n continue\n self.fail(\"Inplace operation on integer tensor that should be promoted to float didn't fail!\")\n\n inp = clone_input_helper(sample.input)\n scripted(inp)\n inp = clone_input_helper(sample.input)\n graph = scripted.graph_for(inp)\n FileCheck().check(op.aten_name).check_not(variant_name).run(graph)\n\n # Test tracing:\n for variant in variants:\n variant_name = variant.__name__\n op_name = original_name_inplace if variant is inplace else original_name\n\n def _fn(*sample_args, **sample_kwargs):\n return variant(*sample_args, **sample_kwargs)\n\n inp = (clone_input_helper(sample.input),) + sample_args_kwargs\n traced = torch.jit.trace(_fn, *inp)\n inp = (clone_input_helper(sample.input),) + sample_args_kwargs\n traced(*inp)\n inp = (clone_input_helper(sample.input),) + sample_args_kwargs\n graph = traced.graph_for(*inp)\n FileCheck().check(op_name).check_not(variant_name).run(graph)\n\n\ninstantiate_device_type_tests(TestJit, globals())\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.testing._internal.jit_metaprogramming_utils.create_script_fn",
"torch.testing._internal.common_utils.clone_input_helper",
"torch.testing._internal.common_utils.run_tests",
"torch.jit.CompilationUnit",
"torch.can_cast",
"torch.testing._internal.jit_utils.is_lambda",
"torch.set_default_dtype",
"torch.testing._internal.jit_metaprogramming_utils.create_traced_fn",
"torch.testing._internal.jit_utils.disable_autodiff_subgraph_inlining",
"torch.testing.FileCheck",
"torch.device",
"torch.testing._internal.common_utils.first_sample",
"torch.jit.trace"
]
] |
AlexPC23/Python | [
"77689d74c5444faa1aa253a122602307e52ac581"
] | [
"Spyder/Ejercicios/Comparacion medias.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 6 19:54:52 2021\n\n@author: Alex\n\"\"\"\n\nimport os #sistema operativo\nimport pandas as pd #gestionar datframes\nimport numpy as np #numeric python (vectores, matrices,...)\nimport matplotlib.pyplot as plt #graficos\nimport scipy.stats as stats #Tests estadisticos\nimport seaborn as sns #Graficos pro\n\nos.chdir('C:/Programacion Estadistica PEP/ejercicio comparacion medias')\nos.getcwd()\nwbr = pd.read_csv('USA_cars_datasets.csv', sep=',', decimal='.')\n#Media del precio de los coches\nwbr.price.describe()\nplt.hist(wbr.price)\nplt.xlabel('Price')\nplt.ylabel('Frequency')\nprops = dict(boxstyle= 'round', facecolor='white', lw=0.5)\nplt.text(55000,550,'Mean:18767.67''\\n''n:2499' '\\n' 'std:12116.09', bbox=props)\nplt.title('Number of cars sold by price ''\\n')\nplt.show()\n\n\nwbr.mileage.describe()\n#Kilometraje coches\nwbr.loc [(wbr['mileage']<50000), \"mileage_cat2\"] = \"1: Poco kilometraje\"\nwbr.loc [(wbr['mileage']>=50000) & (wbr['mileage']<150000), \"mileage_cat2\"] = \"2: Kilometraje normal\"\nwbr.loc [(wbr['mileage']>=150000), \"mileage_cat2\"] = \"3: Alto kilometraje\"\n\nmytable = pd.crosstab(index=wbr[\"mileage_cat2\"], columns=\"count\")\nn=mytable.sum()\nmytable2 = (mytable/n)*100\nplt.bar(mytable2.index, mytable2['count'])\n\n#2º Hacer el test\n#Comparacion descriptiva:\nwbr.groupby('mileage_cat2').price.mean()\n\n#Comparacion estadistica:\n#Extraer las muestras y guardalas en objetos:\nprice_pocoskm=wbr.loc [(wbr['mileage']<50000), \"price\"] \nprice_normalkm=wbr.loc [(wbr['mileage']>=50000) & (wbr['mileage']<150000), \"price\"] \nprice_muchoskm=wbr.loc [(wbr['mileage']>=150000), \"price\"]\n\n#Hacer F DE FISHER de las medias para comparar\nres = stats.f_oneway(price_pocoskm, price_normalkm, price_muchoskm)\n#pvalue= 5.077309184346995e-110\nprint(res)\nprint('F:', round(res[0],3), 'PValue:', round(res[1],3))\n\n\n#COMPARACION GRAFICA: intervalos de confianza para las medias\n\nplt.figure(figsize=(7,5))\nax = sns.pointplot(x=\"mileage_cat2\", y=\"price\", data=wbr, capsize=0.05, ci=95, join=0)\nax.set_ylabel('')\n\nplt.axhline(y=wbr.price.mean(), linewidth=1, linestyle= 'dashed', color=\"green\")\nprops = dict(boxstyle='round', facecolor='white', lw=0.5)\nplt.text(1.5, 5000, 'Mean:18767.67''\\n''n:2499' '\\n' 'F: 278.83''\\n' 'Pval.: 0.000', bbox=props)\nplt.xlabel('Kilometraje')\nplt.title('Average rentals by mileage''\\n')\n\n\n\n"
] | [
[
"pandas.read_csv",
"pandas.crosstab",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.text",
"scipy.stats.f_oneway",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.bar"
]
] |
happog/PaddleOCR | [
"5ed1e2427b4e1759f0e9278f453e8d497db33b59"
] | [
"deploy/pdserving/ocr_local_server.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom paddle_serving_client import Client\nfrom paddle_serving_app.reader import OCRReader\nimport cv2\nimport sys\nimport numpy as np\nimport os\nfrom paddle_serving_client import Client\nfrom paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor\nfrom paddle_serving_app.reader import Div, Normalize, Transpose\nfrom paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes\nif sys.argv[1] == 'gpu':\n from paddle_serving_server_gpu.web_service import WebService\nelif sys.argv[1] == 'cpu':\n from paddle_serving_server.web_service import WebService\nfrom paddle_serving_app.local_predict import Debugger\nimport time\nimport re\nimport base64\n\n\nclass OCRService(WebService):\n def init_det_debugger(self, det_model_config):\n self.det_preprocess = Sequential([\n ResizeByFactor(32, 960), Div(255),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(\n (2, 0, 1))\n ])\n self.det_client = Debugger()\n if sys.argv[1] == 'gpu':\n self.det_client.load_model_config(\n det_model_config, gpu=True, profile=False)\n elif sys.argv[1] == 'cpu':\n self.det_client.load_model_config(\n det_model_config, gpu=False, profile=False)\n self.ocr_reader = OCRReader()\n\n def preprocess(self, feed=[], fetch=[]):\n data = base64.b64decode(feed[0][\"image\"].encode('utf8'))\n data = np.fromstring(data, np.uint8)\n im = cv2.imdecode(data, cv2.IMREAD_COLOR)\n ori_h, ori_w, _ = im.shape\n det_img = self.det_preprocess(im)\n _, new_h, new_w = det_img.shape\n det_img = det_img[np.newaxis, :]\n det_img = det_img.copy()\n det_out = self.det_client.predict(\n feed={\"image\": det_img}, fetch=[\"concat_1.tmp_0\"])\n filter_func = FilterBoxes(10, 10)\n post_func = DBPostProcess({\n \"thresh\": 0.3,\n \"box_thresh\": 0.5,\n \"max_candidates\": 1000,\n \"unclip_ratio\": 1.5,\n \"min_size\": 3\n })\n sorted_boxes = SortedBoxes()\n ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]\n dt_boxes_list = post_func(det_out[\"concat_1.tmp_0\"], [ratio_list])\n dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])\n dt_boxes = sorted_boxes(dt_boxes)\n get_rotate_crop_image = GetRotateCropImage()\n img_list = []\n max_wh_ratio = 0\n for i, dtbox in enumerate(dt_boxes):\n boximg = get_rotate_crop_image(im, dt_boxes[i])\n img_list.append(boximg)\n h, w = boximg.shape[0:2]\n wh_ratio = w * 1.0 / h\n max_wh_ratio = max(max_wh_ratio, wh_ratio)\n if len(img_list) == 0:\n return [], []\n _, w, h = self.ocr_reader.resize_norm_img(img_list[0],\n max_wh_ratio).shape\n imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')\n for id, img in enumerate(img_list):\n norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)\n imgs[id] = norm_img\n feed = {\"image\": imgs.copy()}\n fetch = [\"ctc_greedy_decoder_0.tmp_0\", \"softmax_0.tmp_0\"]\n return feed, fetch\n\n def postprocess(self, feed={}, fetch=[], fetch_map=None):\n rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)\n res_lst = []\n for res in rec_res:\n res_lst.append(res[0])\n res = {\"res\": res_lst}\n return res\n\n\nocr_service = OCRService(name=\"ocr\")\nocr_service.load_model_config(\"ocr_rec_model\")\nocr_service.prepare_server(workdir=\"workdir\", port=9292)\nocr_service.init_det_debugger(det_model_config=\"ocr_det_model\")\nif sys.argv[1] == 'gpu':\n ocr_service.run_debugger_service(gpu=True)\nelif sys.argv[1] == 'cpu':\n ocr_service.run_debugger_service()\nocr_service.run_web_service()\n"
] | [
[
"numpy.fromstring"
]
] |
PMARINA/COS-429 | [
"25134e77101279c3f9f16a6738beb6170ba1fd09"
] | [
"Assignment 0/Part 2 - Getting Familiar with Python/SettingPixels.py"
] | [
"import numpy as np\nimport cv2\nimport os\nwindow_title = \"The Input Image\"\ninput_image = \"input.jpg\"\noutput_image = os.path.basename(__file__)[:-len(\".py\")] + \".jpg\"\nHORIZONTAL = 0\nVERTICAL = 1\n\ndef read_image(file_name = input_image):\n img = cv2.imread(file_name)\n return img\n\ndef display_image(img,window_title = window_title):\n cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)\n cv2.imshow(window_title,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return\n\ndef grayscale(img):\n grayscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #=6, BGR and not RGB because of how cv2 returns images\n return grayscale\n\ndef save_to_disk(img,filename=output_image):\n cv2.imwrite(filename,img)\n\ndef get_dimensions_hw(img):\n return img.shape[0:2]\n\ndef get_middle_pixels_hw(img, new_height, new_width):\n input_img_h,input_img_w = get_dimensions_hw(img)\n if new_height > input_img_h:\n raise ValueError(\"Requested new height (\" + str(new_height) + \") is greater than image height (\" + str(input_img_h) + \").\")\n if new_width > input_img_w:\n raise ValueError(\"Requested new width (\" + str(new_width) + \") is greater than image width (\" + str(input_img_w) + \").\")\n middle_h = round(input_img_h/2)\n half_new_height = round(new_height/2)\n middle_w = round(input_img_w/2)\n half_new_width = round(new_width/2)\n middle_pixels = img[middle_h-half_new_height:middle_h+half_new_height,middle_w-half_new_width:middle_w+half_new_width]\n return middle_pixels\n\ndef set_periodic_pixel(img, frequency, direction, new_pixel):\n h,w = get_dimensions_hw(img)\n img = np.array(img,copy=True)\n if direction == HORIZONTAL:\n for i in range(0,h):\n for j in range(0,w,frequency):\n img[i][j] = new_pixel\n elif direction == VERTICAL:\n for i in range(0,h,frequency):\n for j in range(0,w):\n img[i][j] = new_pixel\n return img \n \n\nif __name__ == \"__main__\":\n img = read_image()\n revised = set_periodic_pixel(img,10,HORIZONTAL,0)\n revised = set_periodic_pixel(revised, 20, VERTICAL, 0)\n save_to_disk(revised)\n display_image(revised)\n\n#Note: Owing to the large input image used for this example, the program will not show all\n#lines unless you zoom in on the saved file (unless your monitor happens to have enough\n#resolution...)\n"
] | [
[
"numpy.array"
]
] |
raymondEhlers/uproot4 | [
"b266614eb3e24d02fa5ed2be4a2d95ab71a5e499"
] | [
"tests/test_0017-multi-basket-multi-branch-fetch.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE\n\nfrom __future__ import absolute_import\n\nimport sys\nimport json\n\ntry:\n from io import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nimport numpy\nimport pytest\nimport skhep_testdata\n\nimport uproot4\nimport uproot4.interpretation.numerical\nimport uproot4.interpretation.library\nimport uproot4.source.futures\n\n\ndef test_any_basket():\n interpretation = uproot4.interpretation.numerical.AsDtype(\">i4\")\n\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample/i4\"] as branch:\n assert branch.basket(0).array(interpretation).tolist() == [\n -15,\n -14,\n -13,\n -12,\n -11,\n -10,\n -9,\n ]\n assert branch.basket(1).array(interpretation).tolist() == [\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n ]\n assert branch.basket(2).array(interpretation).tolist() == [\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n ]\n assert branch.basket(3).array(interpretation).tolist() == [\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n ]\n assert branch.basket(4).array(interpretation).tolist() == [\n 13,\n 14,\n ]\n\n\ndef test_stitching_arrays():\n interpretation = uproot4.interpretation.numerical.AsDtype(\"i8\")\n expectation = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\n basket_arrays = [[0, 1, 2, 3, 4], [5, 6], [], [7, 8, 9], [10], [11, 12, 13, 14]]\n basket_arrays = [numpy.array(x) for x in basket_arrays]\n entry_offsets = numpy.array([0, 5, 7, 7, 10, 11, 15])\n library = uproot4.interpretation.library._libraries[\"np\"]\n\n for start in range(16):\n for stop in range(15, -1, -1):\n actual = interpretation.final_array(\n basket_arrays, start, stop, entry_offsets, library, None\n )\n assert expectation[start:stop] == actual.tolist()\n\n\ndef _names_entries_to_ranges_or_baskets(self, branch_names, entry_start, entry_stop):\n out = []\n for name in branch_names:\n branch = self[name]\n for basket_num, range_or_basket in branch.entries_to_ranges_or_baskets(\n entry_start, entry_stop\n ):\n out.append((branch, basket_num, range_or_basket))\n return out\n\n\ndef test_names_entries_to_ranges_or_baskets():\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample\"] as sample:\n out = _names_entries_to_ranges_or_baskets(sample, [\"i4\"], 0, 30)\n assert [x[1] for x in out] == [0, 1, 2, 3, 4]\n assert [x[2] for x in out] == [\n (6992, 7091),\n (16085, 16184),\n (25939, 26038),\n (35042, 35141),\n (40396, 40475),\n ]\n\n\ndef test_ranges_or_baskets_to_arrays():\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample\"] as sample:\n branch = sample[\"i4\"]\n\n ranges_or_baskets = _names_entries_to_ranges_or_baskets(sample, [\"i4\"], 0, 30)\n branchid_interpretation = {\n branch.cache_key: uproot4.interpretation.numerical.AsDtype(\">i4\")\n }\n entry_start, entry_stop = (0, 30)\n decompression_executor = uproot4.source.futures.TrivialExecutor()\n interpretation_executor = uproot4.source.futures.TrivialExecutor()\n library = uproot4.interpretation.library._libraries[\"np\"]\n\n arrays = {}\n uproot4.behaviors.TBranch._ranges_or_baskets_to_arrays(\n sample,\n ranges_or_baskets,\n branchid_interpretation,\n entry_start,\n entry_stop,\n decompression_executor,\n interpretation_executor,\n library,\n arrays,\n )\n assert arrays[branch.cache_key].tolist() == [\n -15,\n -14,\n -13,\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_1(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n assert branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"), library=\"np\"\n ).tolist() == [\n -15,\n -14,\n -13,\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_2(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n assert branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"),\n entry_start=3,\n entry_stop=-5,\n library=\"np\",\n ).tolist() == [\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_3(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n assert branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"),\n entry_start=3,\n entry_stop=-5,\n interpretation_executor=uproot4.decompression_executor,\n library=\"np\",\n ).tolist() == [\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n\[email protected](\n \"file_handler\",\n [uproot4.source.file.MultithreadedFileSource, uproot4.source.file.MemmapSource],\n)\ndef test_branch_array_4(file_handler):\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n file_handler=file_handler,\n )[\"sample/i4\"] as branch:\n with pytest.raises(ValueError):\n branch.array(uproot4.interpretation.numerical.AsDtype(\">i8\"), library=\"np\")\n\n\ndef test_cache():\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\"),\n object_cache=100,\n array_cache=\"100 MB\",\n ) as f:\n assert f.cache_key == \"db4be408-93ad-11ea-9027-d201a8c0beef:/\"\n assert f[\"sample\"].cache_key == \"db4be408-93ad-11ea-9027-d201a8c0beef:/sample;1\"\n assert (\n f[\"sample/i4\"].cache_key\n == \"db4be408-93ad-11ea-9027-d201a8c0beef:/sample;1:i4(16)\"\n )\n i4 = f[\"sample/i4\"]\n assert list(f.file.array_cache) == []\n i4.array(uproot4.interpretation.numerical.AsDtype(\">i4\"), library=\"np\")\n assert list(f.file.array_cache) == [\n \"db4be408-93ad-11ea-9027-d201a8c0beef:/sample;1:i4(16):AsDtype(Bi4(),Li4()):0-30:np\"\n ]\n\n with pytest.raises(OSError):\n i4.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"), entry_start=3, library=\"np\"\n )\n\n i4.array(uproot4.interpretation.numerical.AsDtype(\">i4\"), library=\"np\")\n\n\ndef test_pandas():\n pandas = pytest.importorskip(\"pandas\")\n with uproot4.open(\n skhep_testdata.data_path(\"uproot-sample-6.20.04-uncompressed.root\")\n )[\"sample/i4\"] as branch:\n series = branch.array(\n uproot4.interpretation.numerical.AsDtype(\">i4\"),\n entry_start=3,\n entry_stop=-5,\n interpretation_executor=uproot4.decompression_executor,\n library=\"pd\",\n )\n assert isinstance(series, pandas.Series)\n assert series.values.tolist() == [\n -12,\n -11,\n -10,\n -9,\n -8,\n -7,\n -6,\n -5,\n -4,\n -3,\n -2,\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n"
] | [
[
"numpy.array"
]
] |
sakshamji/FacemaskDetection | [
"b274285ebaef51c110fab3dc608a2c2ef956ec95"
] | [
"facemask.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 15:56:47 2020\n\n@author: Saksham\n\"\"\"\n\n\nimport numpy as np\nimport keras\nimport keras.backend as k\nfrom keras.layers import Conv2D,MaxPooling2D,SpatialDropout2D,Flatten,Dropout,Dense\nfrom keras.models import Sequential,load_model\nfrom keras.optimizers import adam\nfrom keras.preprocessing import image\nimport cv2\nimport datetime\n\n\n# UNCOMMENT THE FOLLOWING CODE TO TRAIN THE CNN FROM SCRATCH\n\n# BUILDING MODEL TO CLASSIFY BETWEEN MASK AND NO MASK\n\nmodel=Sequential()\nmodel.add(Conv2D(32,(3,3),activation='relu',input_shape=(150,150,3)))\nmodel.add(MaxPooling2D() )\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPooling2D() )\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPooling2D() )\nmodel.add(Flatten())\nmodel.add(Dense(100,activation='relu'))\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n\nfrom keras.preprocessing.image import ImageDataGenerator\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntraining_set = train_datagen.flow_from_directory(\n 'train',\n target_size=(150,150),\n batch_size=16 ,\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n 'test',\n target_size=(150,150),\n batch_size=16,\n class_mode='binary')\n\nmodel_saved=model.fit_generator(\n training_set,\n epochs=10,\n validation_data=test_set,\n\n )\n\nmodel.save('mymodel.h5',model_saved)\n\n#To test for individual images\n\nmymodel=load_model('mymodel.h5')\n#test_image=image.load_img('C:/Users/saksham/Desktop/ML Datasets/Face Mask Detection/Dataset/test/without_mask/30.jpg',target_size=(150,150,3))\ntest_image=image.load_img(r'C:/Users/saksham/Desktop/FaceMaskDetector/test/with_mask/1-with-mask.jpg',\n target_size=(150,150,3))\ntest_image\ntest_image=image.img_to_array(test_image)\ntest_image=np.expand_dims(test_image,axis=0)\nmymodel.predict(test_image)[0][0]\n\n\n# IMPLEMENTING LIVE DETECTION OF FACE MASK\n\nmymodel=load_model('mymodel.h5')\n\ncap=cv2.VideoCapture(0)\nface_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nwhile cap.isOpened():\n _,img=cap.read()\n face=face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=4)\n for(x,y,w,h) in face:\n face_img = img[y:y+h, x:x+w]\n cv2.imwrite('temp.jpg',face_img)\n test_image=image.load_img('temp.jpg',target_size=(150,150,3))\n test_image=image.img_to_array(test_image)\n test_image=np.expand_dims(test_image,axis=0)\n pred=mymodel.predict(test_image)[0][0]\n if pred==1:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)\n cv2.putText(img,'NO MASK',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)\n else:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)\n cv2.putText(img,'MASK',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),3)\n datet=str(datetime.datetime.now())\n cv2.putText(img,datet,(400,450),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1)\n \n cv2.imshow('img',img)\n \n if cv2.waitKey(1)==ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.expand_dims"
]
] |
AjayJohnAlex/ANN | [
"236bc4ca4aaa07038610bc6870578b1f0255da49"
] | [
"Improving the ANN.py"
] | [
"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\ndataset = pd.read_csv('Churn_Modelling.csv')\ndataset.head()\n\n\n# In[3]:\n\n\nX = dataset.iloc[:,3:13].values\n\n\n# In[4]:\n\n\ny = dataset.iloc[:,13].values\n\n\n# In[5]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[6]:\n\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\n\n\n# In[7]:\n\n\n# we have some object type data and hence need to convert\n# them into int\nlabelencoder1 = LabelEncoder()\n# for geography\nX[:,1] = labelencoder1.fit_transform(X[:,1])\n\n\n# In[8]:\n\n\n# we have some object type data and hence need to convert\n# them into int\nlabelencoder2 = LabelEncoder()\n# for gender\nX[:,2] = labelencoder2.fit_transform(X[:,2])\n\n\n# In[9]:\n\n\n# we need to create dummy values for geography and drop the\n# the unwanted column out of it \n\nonehotencoder = OneHotEncoder(categorical_features=[1])\n\n\n# In[10]:\n\n\nX = onehotencoder.fit_transform(X).toarray()\n\n\n# In[11]:\n\n\n# removing the first dummy class\nX = X [:,1:]\n\n\n# In[12]:\n\n\nX.shape\n\n\n# In[13]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(\nX, y, test_size=0.30, random_state=101)\n\n\n# In[14]:\n\n\n# feature scaling \nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[15]:\n\n\nsc = StandardScaler()\n\n\n# In[16]:\n\n\nX_train = sc.fit_transform(X_train)\n\n\n# In[17]:\n\n\nX_test = sc.transform(X_test)\n\n\n# In[18]:\n\n\n# implementing k 4 cross validation to make better pred\n# keras classifier wrapper and it expects a function to \n# returned as its builds the architecture of ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\n# k4 models \nfrom sklearn.model_selection import cross_val_score\n# to initialise the ANN\nfrom keras.models import Sequential\n# dense model to build layers of ann\nfrom keras.layers import Dense\n\n\n# In[23]:\n\n\n# the classifer is local in fn\ndef build_classifier():\n \n # you can initialise a ANN in 2 ways \n # either def sequence of layers \n # or def by a graph\n\n # object of sequencial\n classifier = Sequential()\n # adding 2 layers : input layer and first hidden layer\n # units = no of hidden layers\n # kernal_initializer = initilaise weights using function\n # activation = activation function\n # input_dim = no of features in the input \n classifier.add(Dense(units=6,kernel_initializer='uniform',\n activation= 'relu',input_dim=11))\n \n # we will add one more hidden layer even though its not \n # neccesarry \n # we are adding it so that we can learn how to add one more\n # layer \n # and deep learning has many hiiden layers in ANN\n classifier.add(Dense(units=6,kernel_initializer='uniform',\n activation= 'relu'))\n \n # adding output layer \n classifier.add(Dense(units=1,kernel_initializer='uniform',\n activation= 'sigmoid'))\n \n # compile the ANN by applying stochastic GD\n # optimizer = the algo we need to use to find the optimal \n # weights ...there are many we wld use ADAM\n # loss = SGD is based on lost function we needs to be optimised\n # \n classifier.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n \n return classifier\n\n\n# In[24]:\n\n\n# new classifier\nclassifier = KerasClassifier(\n build_fn=build_classifier,batch_size =10,epochs = 100)\n\n\n# In[21]:\n\n\n# now we use cross value score from sklearn \n# k4 classification is used to get a relevant pred\n# it wld return 10 accuracy\n\n# accuracies = cross_val_score(estimator=classifier,X=X_train,y=y_train,cv=10,n_jobs = 1)\n\n\n# In[22]:\n\n\nmean = accuracies.mean()\nvariance = accuracies.std()\nprint('mean is ',mean)\nprint('variance is ',variance)\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder"
]
] |
TomLXXVI/pipe-network-sim | [
"49e42621180ec3125afa238d3ca56ae9f3a7662a"
] | [
"lib/nummath/deriv.py"
] | [
"import numpy as np\n\n\nclass Deriv:\n \"\"\"\n Calculate the derivative with given order of the function f(t) at point t.\n \"\"\"\n def __init__(self, f, dt, o=1):\n \"\"\"\n Initialize the differentiation solver.\n Params:\n - f the name of the function object ('def f(t):...')\n - dt the calculation step between successive points\n - o the order of the derivative to be calculated\n \"\"\"\n self.f = f\n self.dt = dt\n self.o = o\n\n # coefficients of forward finite difference approximations of order O(h^2)\n self.co = np.array([\n [-3.0, 4.0, -1.0, 0.0, 0.0, 0.0],\n [2.0, -5.0, 4.0, -1.0, 0.0, 0.0],\n [-5.0, 18.0, -24.0, 14.0, -3.0, 0.0],\n [3.0, -14.0, 26.0, -24.0, 11.0, -2.0]\n ])\n self.den = np.array([2 * dt, dt ** 2, 2 * dt ** 3, dt ** 4])\n\n def solve(self, t):\n \"\"\"\n Calculate the derivative at point 't'.\n The method uses Richardson extrapolation to improve accuracy.\n \"\"\"\n df = [0.0, 0.0]\n for i, dt_ in enumerate([self.dt, self.dt / 2]):\n t_array = np.arange(t, t + 6 * dt_, dt_)\n f_array = np.array([self.f(t_i) for t_i in t_array])\n c_array = self.co[self.o - 1, :]\n df[i] = (c_array * f_array) / self.den[self.o - 1]\n return (4.0 * df[1] - df[0]) / 3.0\n"
] | [
[
"numpy.array",
"numpy.arange"
]
] |
whitews/FlowK | [
"d4e43a0488606ce5479b5110486dc3db128f6a87"
] | [
"flowkit/tests/flowjo_wsp_tests.py"
] | [
"\"\"\"\nTests for FlowJo 10 workspace files\n\"\"\"\nimport copy\nimport unittest\nimport os\nfrom io import BytesIO\nimport numpy as np\nfrom flowkit import Session, gates, transforms\nfrom .session_tests import test_samples_8c_full_set\n\n\nclass FlowJoWSPTestCase(unittest.TestCase):\n def test_load_wsp_single_poly(self):\n wsp_path = \"examples/data/simple_line_example/simple_poly_and_rect.wsp\"\n fcs_path = \"examples/data/simple_line_example/data_set_simple_line_100.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'my_group',\n 'poly1',\n sample_id='data_set_simple_line_100.fcs'\n ),\n gates.PolygonGate\n )\n\n gate_names = {'rect1', 'poly1'}\n wsp_gates_tuple = fks.get_gate_ids('my_group')\n wsp_gate_names = set([g[0] for g in wsp_gates_tuple])\n self.assertSetEqual(wsp_gate_names, gate_names)\n\n def test_load_wsp_single_ellipse(self):\n wsp_path = \"examples/data/simple_line_example/single_ellipse_51_events.wsp\"\n fcs_path = \"examples/data/simple_line_example/data_set_simple_line_100.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'ellipse1',\n sample_id='data_set_simple_line_100.fcs'\n ),\n gates.EllipsoidGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'data_set_simple_line_100.fcs')\n gate_count = results.get_gate_count('ellipse1')\n self.assertEqual(gate_count, 48)\n\n def test_load_wsp_single_quad(self):\n wsp_path = \"examples/data/simple_diamond_example/simple_diamond_example_quad_gate.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n # FlowJo quadrant gates are not true quadrant gates, rather a collection of rectangle gates\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'Q1: channel_A- , channel_B+',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n\n gate_count_q1 = results.get_gate_count('Q1: channel_A- , channel_B+')\n gate_count_q2 = results.get_gate_count('Q2: channel_A+ , channel_B+')\n gate_count_q3 = results.get_gate_count('Q3: channel_A+ , channel_B-')\n gate_count_q4 = results.get_gate_count('Q4: channel_A- , channel_B-')\n self.assertEqual(gate_count_q1, 49671)\n self.assertEqual(gate_count_q2, 50596)\n self.assertEqual(gate_count_q3, 50330)\n self.assertEqual(gate_count_q4, 49403)\n\n def test_wsp_biex_transform(self):\n wsp_path = \"examples/data/simple_diamond_example/test_data_diamond_biex_rect.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'upper_right',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n gate_count = results.get_gate_count('upper_right')\n self.assertEqual(gate_count, 50605)\n\n def test_wsp_fasinh_transform(self):\n wsp_path = \"examples/data/simple_diamond_example/test_data_diamond_asinh_rect.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'upper_right',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n gate_count = results.get_gate_count('upper_right')\n self.assertEqual(gate_count, 50559)\n\n def test_wsp_fasinh_transform_v2(self):\n wsp_path = \"examples/data/simple_diamond_example/test_data_diamond_asinh_rect2.wsp\"\n fcs_path = \"examples/data/simple_diamond_example/test_data_diamond_01.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n self.assertIsInstance(\n fks.get_gate(\n 'All Samples',\n 'upper_right',\n sample_id='test_data_diamond_01.fcs'\n ),\n gates.RectangleGate\n )\n\n fks.analyze_samples(group_name='All Samples')\n results = fks.get_gating_results('All Samples', 'test_data_diamond_01.fcs')\n gate_count = results.get_gate_count('upper_right')\n self.assertEqual(gate_count, 50699)\n\n def test_wsp_biex_transform_width_interpolation(self):\n neg = 1.0\n width = -7.943282\n\n # this LUT exists for only the single negative value of 1.0\n lut_file_name = \"tr_biex_l256_w%.6f_n%.6f_m4.418540_r262144.000029.csv\" % (width, neg)\n lut_file_path = os.path.join('examples', 'data', 'flowjo_xforms', lut_file_name)\n y, x = np.loadtxt(lut_file_path, delimiter=',', usecols=(0, 1), skiprows=1, unpack=True)\n\n biex_xform = transforms.WSPBiexTransform('biex', negative=neg, width=width)\n\n test_y = biex_xform.apply(x)\n\n mean_pct_diff = 100. * np.mean(np.abs(test_y[1:] - y[1:]) / y[1:])\n self.assertLess(mean_pct_diff, 0.01)\n\n def test_get_sample_groups(self):\n wsp_path = \"examples/data/simple_line_example/simple_poly_and_rect.wsp\"\n fcs_path = \"examples/data/simple_line_example/data_set_simple_line_100.fcs\"\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path)\n\n groups = fks.get_sample_groups()\n groups_truth = ['default', 'All Samples', 'my_group']\n\n self.assertListEqual(groups, groups_truth)\n\n fks.add_sample_group('group2')\n groups_truth.append('group2')\n groups = fks.get_sample_groups()\n\n self.assertListEqual(groups, groups_truth)\n\n def test_parse_wsp_with_ellipse(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS_with_ellipse.wsp\"\n fcs_path = \"examples/data/8_color_data_set/fcs_files/101_DEN084Y5_15_E01_008_clean.fcs\"\n sample_id = '101_DEN084Y5_15_E01_008_clean.fcs'\n sample_grp = 'DEN'\n gate_name = 'ellipse1'\n gate_path = ('root', 'Time', 'Singlets', 'aAmine-', 'CD3+')\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n fks.analyze_samples(sample_grp, sample_id=sample_id)\n gate_indices = fks.get_gate_membership(sample_grp, sample_id, gate_name, gate_path=gate_path)\n\n self.assertIsInstance(gate_indices, np.ndarray)\n self.assertEqual(np.sum(gate_indices), 7018)\n\n def test_get_ambiguous_gate_objects(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS.wsp\"\n fcs_path = \"examples/data/8_color_data_set/fcs_files/101_DEN084Y5_15_E01_008_clean.fcs\"\n sample_id = '101_DEN084Y5_15_E01_008_clean.fcs'\n sample_grp = 'DEN'\n gate_name = 'TNFa+'\n gate_path = ('root', 'Time', 'Singlets', 'aAmine-', 'CD3+', 'CD4+')\n\n fks = Session(fcs_samples=fcs_path)\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n fks.analyze_samples(sample_grp)\n gate_indices = fks.get_gate_membership(sample_grp, sample_id, gate_name, gate_path=gate_path)\n\n self.assertIsInstance(gate_indices, np.ndarray)\n self.assertEqual(np.sum(gate_indices), 21)\n\n def test_parse_wsp_reused_gate_with_child(self):\n wsp_path = \"examples/data/8_color_data_set/reused_quad_gate_with_child.wsp\"\n\n fks = Session(copy.deepcopy(test_samples_8c_full_set))\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n group_name = 'All Samples'\n gate_name = 'some_child_gate'\n\n gate_ids = fks.get_gate_ids(group_name)\n\n gate_id_1 = (gate_name, ('root', 'good cells', 'cd4+', 'Q2: CD107a+, IL2+'))\n gate_id_2 = (gate_name, ('root', 'good cells', 'cd8+', 'Q2: CD107a+, IL2+'))\n\n self.assertIn(gate_id_1, gate_ids)\n self.assertIn(gate_id_2, gate_ids)\n\n def test_analyze_single_sample(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS_simple.wsp\"\n sample_id = '101_DEN084Y5_15_E01_008_clean.fcs'\n sample_grp = 'DEN'\n\n fks = Session(copy.deepcopy(test_samples_8c_full_set))\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n sample_ids = fks.get_group_sample_ids(sample_grp)\n self.assertEqual(len(sample_ids), 3)\n\n fks.analyze_samples(sample_grp, sample_id=sample_id)\n report = fks.get_group_report(sample_grp)\n\n self.assertEqual(report['sample'].nunique(), 1)\n\n def test_export_wsp(self):\n wsp_path = \"examples/data/8_color_data_set/8_color_ICS.wsp\"\n sample_grp = 'DEN'\n\n # use a leaf gate to test if the new WSP session is created correctly\n gate_name = 'TNFa+'\n gate_path = ('root', 'Time', 'Singlets', 'aAmine-', 'CD3+', 'CD4+')\n\n fks = Session(copy.deepcopy(test_samples_8c_full_set))\n fks.import_flowjo_workspace(wsp_path, ignore_missing_files=True)\n\n out_file = BytesIO()\n fks.export_wsp(out_file, sample_grp)\n out_file.seek(0)\n\n fks_out = Session(copy.deepcopy(test_samples_8c_full_set))\n fks_out.import_flowjo_workspace(out_file, ignore_missing_files=True)\n\n self.assertIsInstance(fks_out, Session)\n\n fks_gate = fks.get_gate(sample_grp, gate_name, gate_path)\n fks_out_gate = fks_out.get_gate(sample_grp, gate_name, gate_path)\n\n self.assertIsInstance(fks_gate, gates.RectangleGate)\n self.assertIsInstance(fks_out_gate, gates.RectangleGate)\n\n self.assertEqual(fks_gate.gate_name, gate_name)\n self.assertEqual(fks_out_gate.gate_name, gate_name)\n"
] | [
[
"numpy.sum",
"numpy.abs",
"numpy.loadtxt"
]
] |
hushukai/tf-tensor2tensor | [
"6e685f57ed170bb7f887271d7bbd58cf57eb6af7"
] | [
"tensor2tensor/utils/multistep_optimizer.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Multi-step optimizers simulating large batches.\n\nOptimizer variants which make it possible to use very large batch sizes with\nlimited GPU memory. Optimizers in this module accumulate the gradients for n\nbatches, and call the optimizer's update rule every n batches with the\naccumulated gradients.\n\nSee [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\n\nclass MultistepAdamOptimizer(tf.compat.v1.train.AdamOptimizer):\n \"\"\"Adam with SGD updates every n steps with accumulated gradients.\"\"\"\n\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,\n use_locking=False, name=\"Adam\", n=1):\n super(MultistepAdamOptimizer, self).__init__(\n learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon,\n use_locking=use_locking, name=name)\n self._n = n # Call Adam optimizer every n batches with accumulated grads\n self._n_t = None # n as tensor\n\n def _create_slots(self, var_list):\n \"\"\"Create slot variables for Adam with accumulated gradients.\"\"\"\n super(MultistepAdamOptimizer, self)._create_slots(var_list)\n first_var = min(var_list, key=lambda x: x.name)\n self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1,\n name=\"iter\",\n colocate_with=first_var)\n for v in var_list:\n self._zeros_slot(v, \"grad_acc\", self._name)\n\n def _get_iter_variable(self):\n graph = (\n None if tf.executing_eagerly() else tf.get_default_graph())\n return self._get_non_slot_variable(\"iter\", graph=graph)\n\n def _prepare(self):\n super(MultistepAdamOptimizer, self)._prepare()\n self._n_t = tf.convert_to_tensor(self._n, name=\"n\")\n\n def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):\n \"\"\"Apply conditionally if counter is zero.\"\"\"\n grad_acc = self.get_slot(var, \"grad_acc\")\n\n def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):\n total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)\n adam_op = apply_fn(total_grad, var, *args, **kwargs)\n with tf.control_dependencies([adam_op]):\n grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),\n use_locking=self._use_locking)\n return tf.group(adam_op, grad_acc_to_zero_op)\n\n def accumulate_gradient(grad_acc, grad):\n assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)\n return tf.group(assign_op) # Strip return value\n\n return tf.cond(\n tf.equal(self._get_iter_variable(), 0),\n lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),\n lambda: accumulate_gradient(grad_acc, grad))\n\n def _apply_dense(self, grad, var):\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._apply_dense, grad, var)\n\n def _resource_apply_dense(self, grad, var):\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._resource_apply_dense, grad, var)\n\n def _apply_sparse_shared(self, grad, var, indices, scatter_add):\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._apply_sparse_shared, grad, var,\n indices, scatter_add)\n\n def _apply_sparse(self, grad, var):\n # TODO(fstahlberg): Implement a sparse version\n tf.logging.warning(\"MultistepAdamOptimizer does not support sparse updates\")\n dense_grad = tf.convert_to_tensor(grad)\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._apply_dense, dense_grad, var)\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n tf.logging.warning(\"MultistepAdamOptimizer does not support sparse updates\")\n # Note that conversion to a dense Tensor handles duplicate `indices`\n # correctly (summing them). A real sparse implementation will probably want\n # to override _resource_apply_sparse instead so it gets them de-duplicated\n # automatically.\n dense_grad = tf.convert_to_tensor(\n tf.IndexedSlices(values=grad, indices=indices,\n dense_shape=tf.shape(var)))\n return self._apply_cond(\n super(MultistepAdamOptimizer, self)._resource_apply_dense,\n dense_grad, var)\n\n def _finish(self, update_ops, name_scope):\n \"\"\"Updates beta_power variables every n batches and incrs counter.\"\"\"\n iter_ = self._get_iter_variable()\n beta1_power, beta2_power = self._get_beta_accumulators()\n with tf.control_dependencies(update_ops):\n with tf.colocate_with(iter_):\n\n def update_beta_op():\n update_beta1 = beta1_power.assign(\n beta1_power * self._beta1_t,\n use_locking=self._use_locking)\n update_beta2 = beta2_power.assign(\n beta2_power * self._beta2_t,\n use_locking=self._use_locking)\n return tf.group(update_beta1, update_beta2)\n maybe_update_beta = tf.cond(\n tf.equal(iter_, 0), update_beta_op, tf.no_op)\n with tf.control_dependencies([maybe_update_beta]):\n update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),\n use_locking=self._use_locking)\n return tf.group(\n *update_ops + [update_iter, maybe_update_beta], name=name_scope)\n"
] | [
[
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.mod",
"tensorflow.compat.v1.assign_add",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.colocate_with"
]
] |
liulisixin/unsupervised-learning-intrinsic-images | [
"0d4ad151d203885c87122bcc305c787210b28a5c"
] | [
"data/params.py"
] | [
"import copy\nimport json\nimport random\nimport hashlib\nimport numpy as np\n\n\nclass IntrinsicParameters():\n \"\"\" Global parameter values for the algorithm \"\"\"\n\n def __init__(self):\n\n #: if True, print progress to the console\n self.logging = False\n\n #: if True, use a fixed seed for k-means clustering\n self.fixed_seed = False\n\n #: number of iterations for the global loop\n self.n_iters = 25\n\n #: number of iterations for the dense CRF\n self.n_crf_iters = 10\n\n #: if ``True``, split clusters at the end\n self.split_clusters = True\n\n #: Pixels k units apart vertically or horizontally are smoothed.\n #: The paper only uses k=1.\n self.shading_smooth_k = 1\n\n #: method used to initialize the shading smoothness term:\n #: \"none\": omit this term for the first iteration\n #: \"image\": use the image itself (intensity channel)\n #: \"constant\": constant 0.5\n self.shading_blur_init_method = 'none'\n\n #: standard deviation for blurring the shading channel\n self.shading_blur_sigma = 0.1\n\n #: exponent by which the blur size decreases each iteration\n self.shading_blur_iteration_pow = 1\n\n #: if ``True``, blur in log space. if ``False``, blur in linear\n #: space and then convert to log.\n self.shading_blur_log = True\n\n #: kmeans initialization: weight given to the intensity channel\n self.kmeans_intensity_scale = 0.5\n\n #: kmeans initialization: number of clusters (labels) to use\n self.kmeans_n_clusters = 20\n\n #: kmeans initialization: max pixels to consider at once\n #: (if the image has more than this, the image is randomly subsampled)\n self.kmeans_max_samples = 2000000\n\n #: weight of the absolute reflectance prior\n self.abs_reflectance_weight = 0\n\n #: weight of the absolute shading prior\n self.abs_shading_weight = 500.0\n\n #: gray-point of absolute shading term\n self.abs_shading_gray_point = 0.5\n\n #: if ``True``, compute shading error in log space\n self.abs_shading_log = True\n\n #: weight of the shading smoothness unary term\n self.shading_target_weight = 20000.0\n\n #: norm used to penalize shading smoothness deviations\n self.shading_target_norm = \"L2\"\n\n #: interpret labels as RGB (intensity with chromaticity), thereby\n #: penalizing deviations from grayscale in the shading channel (though\n #: the final answer is always grayscale anyway)\n self.shading_target_chromaticity = False\n\n #: weight of the chromaticity term: each reflectance intensity is\n #: assigned a chromaticity (from the kmeans initialization) and is\n #: encouraged to be assigned to image pixels that share the same\n #: chromaticity.\n self.chromaticity_weight = 0\n\n #: which norm is used for chromaticity\n self.chromaticity_norm = \"L1\"\n\n #: compute reflectance distance in log space for the pairwise terms\n self.pairwise_intensity_log = True\n\n #: include chromaticity in pairwise term\n self.pairwise_intensity_chromaticity = True\n\n #: weight of the pairwise term\n self.pairwise_weight = 10000.0\n\n #: bilateral standard deviation: pairwise pixel distance\n self.theta_p = 0.1\n\n #: bilateral standard deviation: intensity\n self.theta_l = 0.12\n\n #: bilateral standard deviation: chromaticity\n self.theta_c = 0.025\n\n # bilateral standard deviation: Luminance \n self.theta_L = 0.025 \n\n #: if True, keep the median of all intensities fixed in stage 2. This\n #: doesn't really change much, since the solver is damped anyway.\n self.stage2_maintain_median_intensity = True\n\n #: which norm to use when minimizing shading differences in stage 2\n self.stage2_norm = \"L1\"\n\n #: if True, interpret labels as RGB instead of intensity\n self.stage2_chromaticity = False\n\n #: parameters to be saved/loaded\n ALL_PARAMS = [\n 'n_iters',\n 'n_crf_iters',\n 'split_clusters',\n 'kmeans_n_clusters',\n 'kmeans_max_samples',\n 'shading_blur_init_method',\n 'shading_blur_method',\n 'shading_blur_log',\n 'shading_blur_sigma',\n 'shading_blur_bilateral_sigma_range',\n 'shading_blur_iteration_pow',\n 'shading_smooth_k',\n 'kmeans_intensity_scale',\n 'abs_reflectance_weight',\n 'abs_shading_log',\n 'abs_shading_weight',\n 'abs_shading_gray_point',\n 'shading_target_weight',\n 'shading_target_norm',\n 'shading_target_chromaticity',\n 'chromaticity_weight',\n 'chromaticity_norm',\n 'pairwise_intensity_log',\n 'pairwise_intensity_chromaticity',\n 'pairwise_weight',\n 'theta_p',\n 'theta_l',\n 'theta_c',\n 'stage2_norm',\n 'stage2_chromaticity',\n 'stage2_maintain_median_intensity',\n ]\n\n #: parameters to be adjusted during training\n TRAIN_PARAMS = [\n 'n_iters',\n #'n_crf_iters',\n\n 'split_clusters',\n\n 'kmeans_intensity_scale',\n 'kmeans_n_clusters',\n\n 'shading_blur_init_method',\n #'shading_blur_log',\n #'pairwise_intensity_log',\n\n 'shading_blur_sigma',\n 'shading_smooth_k',\n\n 'abs_reflectance_weight',\n #'abs_shading_log',\n 'abs_shading_weight',\n 'abs_shading_gray_point',\n 'shading_target_weight',\n 'chromaticity_weight',\n 'pairwise_weight',\n\n 'theta_p',\n 'theta_l',\n 'theta_c',\n ]\n\n #: these parameters are discrete 1-of-N choices\n PARAM_CHOICES = {\n 'shading_blur_init_method': (\n \"none\",\n \"image\",\n \"constant\",\n ),\n }\n\n #: bounds on paramters\n PARAM_BOUNDS = {\n 'n_iters': (1, 30),\n 'n_crf_iters': (1, 10),\n 'shading_blur_sigma': (1e-8, 1.0),\n 'shading_smooth_k': (1, 4),\n 'kmeans_intensity_scale': (1e-8, 1e10),\n 'kmeans_n_clusters': (2, 50),\n 'abs_reflectance_weight': (0, 1e10),\n 'abs_shading_weight': (0, 1e10),\n 'abs_shading_gray_point': (0, 1e10),\n 'shading_target_weight': (0, 1e10),\n 'chromaticity_weight': (0, 1e10),\n 'pairwise_weight': (0, 1e16),\n 'theta_p': (1e-8, 1e10),\n 'theta_l': (1e-8, 1e10),\n 'theta_c': (1e-8, 1e10),\n }\n\n WEIGHT_PARAMS = [\n 'abs_reflectance_weight',\n 'abs_shading_weight',\n 'shading_target_weight',\n 'chromaticity_weight',\n 'pairwise_weight',\n ]\n\n THETA_PARAMS = [\n 'theta_p',\n 'theta_l',\n 'theta_c',\n ]\n\n def to_json(self, indent=4, **extra_kwargs):\n \"\"\" Convert paramters to a JSON-encoded string \"\"\"\n obj = {k: getattr(self, k)\n for k in IntrinsicParameters.ALL_PARAMS}\n if extra_kwargs:\n obj.update(extra_kwargs)\n return json.dumps(obj, sort_keys=True, indent=indent)\n\n def __str__(self):\n return self.to_json()\n\n def __unicode__(self):\n return self.to_json()\n\n @staticmethod\n def from_file(filename):\n \"\"\" Load paramers from ``filename`` (in JSON format) \"\"\"\n return IntrinsicParameters.from_dict(json.load(open(filename)))\n\n @staticmethod\n def from_dict(d):\n \"\"\" Load paramers from a dictionary \"\"\"\n ret = IntrinsicParameters()\n for k, v in d.iteritems():\n if not k.startswith('_') and k not in IntrinsicParameters.ALL_PARAMS:\n raise ValueError(\"Invalid parameter: %s\" % k)\n setattr(ret, k, d[k])\n return ret\n\n def md5(self):\n dump = self.to_json()\n m = hashlib.md5()\n m.update(dump)\n return m.hexdigest()\n\n def save(self, filename, **extra_kwargs):\n \"\"\" Save paramers to ``filename`` (in JSON format) \"\"\"\n with open(filename, 'w') as f:\n f.write(self.to_json(**extra_kwargs))\n\n def clip(self):\n \"\"\" Clip parameters to be within bounds \"\"\"\n for k, bounds in IntrinsicParameters.PARAM_BOUNDS.iteritems():\n v = getattr(self, k)\n t = type(v)\n setattr(self, k, t(np.clip(v, bounds[0], bounds[1])))\n\n def random_perterbation(\n self, mean_num_params=8, std_delta=0.5, seed=None):\n \"\"\" Return a new set of parameters with a random perterbation. The\n number of variables modified is Poisson-distributed with mean\n ``mean_num_params`` , and each changed variable is multiplied by exp(x)\n where x is normally distributed with mean 0 and standard deviation\n ``std_delta`` \"\"\"\n\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n\n # choose a random subset to modify\n num_params = len(IntrinsicParameters.TRAIN_PARAMS)\n n = np.clip(np.random.poisson(mean_num_params), 1, num_params)\n keys = random.sample(IntrinsicParameters.TRAIN_PARAMS, n)\n\n # modify the subset\n ret = copy.deepcopy(self)\n for k in keys:\n v = getattr(ret, k)\n t = type(v)\n\n if k in IntrinsicParameters.PARAM_CHOICES:\n v = random.choice(IntrinsicParameters.PARAM_CHOICES[k])\n elif t == bool:\n v = random.choice((False, True))\n else:\n v *= np.exp(random.normalvariate(0, std_delta))\n\n if t in (int, long):\n v = round(v)\n setattr(ret, k, t(v))\n\n ret.clip()\n return ret\n"
] | [
[
"numpy.clip",
"numpy.random.seed",
"numpy.random.poisson"
]
] |
wenming2014/tensorflow | [
"a102a6a71844e194f3946f6318768c5367f1f16b"
] | [
"tensorflow/python/training/checkpoint_management.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=invalid-name\n\"\"\"Save and restore variables.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os.path\nimport re\nimport time\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.checkpoint_state_pb2 import CheckpointState\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _GetCheckpointFilename(save_dir, latest_filename):\n \"\"\"Returns a filename for storing the CheckpointState.\n\n Args:\n save_dir: The directory for saving and restoring checkpoints.\n latest_filename: Name of the file in 'save_dir' that is used\n to store the CheckpointState.\n\n Returns:\n The path of the file that contains the CheckpointState proto.\n \"\"\"\n if latest_filename is None:\n latest_filename = \"checkpoint\"\n return os.path.join(save_dir, latest_filename)\n\n\[email protected](\n date=None,\n instructions=(\"Use tf.train.CheckpointManager to manage checkpoints rather \"\n \"than editing the Checkpoint proto manually.\"))\n@tf_export(v1=[\"train.generate_checkpoint_state_proto\"])\ndef generate_checkpoint_state_proto(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n all_model_checkpoint_timestamps=None,\n last_preserved_timestamp=None):\n \"\"\"Generates a checkpoint state proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n all_model_checkpoint_timestamps: A list of floats, indicating the number of\n seconds since the Epoch when each checkpoint was generated.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\n Returns:\n CheckpointState proto with model_checkpoint_path and\n all_model_checkpoint_paths updated to either absolute paths or\n relative paths to the current save_dir.\n\n Raises:\n ValueError: If `all_model_checkpoint_timestamps` was provided but its length\n does not match `all_model_checkpoint_paths`.\n \"\"\"\n if all_model_checkpoint_paths is None:\n all_model_checkpoint_paths = []\n\n if (not all_model_checkpoint_paths or\n all_model_checkpoint_paths[-1] != model_checkpoint_path):\n logging.info(\"%s is not in all_model_checkpoint_paths. Manually adding it.\",\n model_checkpoint_path)\n all_model_checkpoint_paths.append(model_checkpoint_path)\n\n if (all_model_checkpoint_timestamps\n and (len(all_model_checkpoint_timestamps)\n != len(all_model_checkpoint_paths))):\n raise ValueError(\n (\"Checkpoint timestamps, if provided, must match checkpoint paths (got \"\n \"paths %s and timestamps %s)\")\n % (all_model_checkpoint_paths, all_model_checkpoint_timestamps))\n\n # Relative paths need to be rewritten to be relative to the \"save_dir\"\n # if model_checkpoint_path already contains \"save_dir\".\n if not os.path.isabs(save_dir):\n if not os.path.isabs(model_checkpoint_path):\n model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)\n for i in range(len(all_model_checkpoint_paths)):\n p = all_model_checkpoint_paths[i]\n if not os.path.isabs(p):\n all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)\n\n coord_checkpoint_proto = CheckpointState(\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n\n return coord_checkpoint_proto\n\n\[email protected](\n date=None,\n instructions=(\"Use tf.train.CheckpointManager to manage checkpoints rather \"\n \"than manually editing the Checkpoint proto.\"))\n@tf_export(v1=[\"train.update_checkpoint_state\"])\ndef update_checkpoint_state(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n latest_filename=None,\n all_model_checkpoint_timestamps=None,\n last_preserved_timestamp=None):\n \"\"\"Updates the content of the 'checkpoint' file.\n\n This updates the checkpoint file containing a CheckpointState\n proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\n seconds since the Epoch) indicating when the checkpoints in\n `all_model_checkpoint_paths` were created.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\n Raises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate.\n \"\"\"\n update_checkpoint_state_internal(\n save_dir=save_dir,\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n latest_filename=latest_filename,\n save_relative_paths=False,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n\n\ndef update_checkpoint_state_internal(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n latest_filename=None,\n save_relative_paths=False,\n all_model_checkpoint_timestamps=None,\n last_preserved_timestamp=None):\n \"\"\"Updates the content of the 'checkpoint' file.\n\n This updates the checkpoint file containing a CheckpointState\n proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n save_relative_paths: If `True`, will write relative paths to the checkpoint\n state file.\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\n seconds since the Epoch) indicating when the checkpoints in\n `all_model_checkpoint_paths` were created.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\n\n Raises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate.\n \"\"\"\n # Writes the \"checkpoint\" file for the coordinator for later restoration.\n coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)\n if save_relative_paths:\n if os.path.isabs(model_checkpoint_path):\n rel_model_checkpoint_path = os.path.relpath(\n model_checkpoint_path, save_dir)\n else:\n rel_model_checkpoint_path = model_checkpoint_path\n rel_all_model_checkpoint_paths = []\n for p in all_model_checkpoint_paths:\n if os.path.isabs(p):\n rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))\n else:\n rel_all_model_checkpoint_paths.append(p)\n ckpt = generate_checkpoint_state_proto(\n save_dir,\n rel_model_checkpoint_path,\n all_model_checkpoint_paths=rel_all_model_checkpoint_paths,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n else:\n ckpt = generate_checkpoint_state_proto(\n save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\n last_preserved_timestamp=last_preserved_timestamp)\n\n if coord_checkpoint_filename == ckpt.model_checkpoint_path:\n raise RuntimeError(\"Save path '%s' conflicts with path used for \"\n \"checkpoint state. Please use a different save path.\" %\n model_checkpoint_path)\n\n # Preventing potential read/write race condition by *atomically* writing to a\n # file.\n file_io.atomic_write_string_to_file(coord_checkpoint_filename,\n text_format.MessageToString(ckpt))\n\n\n@tf_export(\"train.get_checkpoint_state\")\ndef get_checkpoint_state(checkpoint_dir, latest_filename=None):\n \"\"\"Returns CheckpointState proto from the \"checkpoint\" file.\n\n If the \"checkpoint\" file contains a valid CheckpointState\n proto, returns it.\n\n Args:\n checkpoint_dir: The directory of checkpoints.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n\n Returns:\n A CheckpointState if the state was available, None\n otherwise.\n\n Raises:\n ValueError: if the checkpoint read doesn't have model_checkpoint_path set.\n \"\"\"\n ckpt = None\n coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,\n latest_filename)\n f = None\n try:\n # Check that the file exists before opening it to avoid\n # many lines of errors from colossus in the logs.\n if file_io.file_exists(coord_checkpoint_filename):\n file_content = file_io.read_file_to_string(\n coord_checkpoint_filename)\n ckpt = CheckpointState()\n text_format.Merge(file_content, ckpt)\n if not ckpt.model_checkpoint_path:\n raise ValueError(\"Invalid checkpoint state loaded from \"\n + checkpoint_dir)\n # For relative model_checkpoint_path and all_model_checkpoint_paths,\n # prepend checkpoint_dir.\n if not os.path.isabs(ckpt.model_checkpoint_path):\n ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,\n ckpt.model_checkpoint_path)\n for i in range(len(ckpt.all_model_checkpoint_paths)):\n p = ckpt.all_model_checkpoint_paths[i]\n if not os.path.isabs(p):\n ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)\n except errors.OpError as e:\n # It's ok if the file cannot be read\n logging.warning(\"%s: %s\", type(e).__name__, e)\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\n return None\n except text_format.ParseError as e:\n logging.warning(\"%s: %s\", type(e).__name__, e)\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\n return None\n finally:\n if f:\n f.close()\n return ckpt\n\n\ndef _prefix_to_checkpoint_path(prefix, format_version):\n \"\"\"Returns the pathname of a checkpoint file, given the checkpoint prefix.\n\n For V1 checkpoint, simply returns the prefix itself (the data file). For V2,\n returns the pathname to the index file.\n\n Args:\n prefix: a string, the prefix of a checkpoint.\n format_version: the checkpoint format version that corresponds to the\n prefix.\n Returns:\n The pathname of a checkpoint file, taking into account the checkpoint\n format version.\n \"\"\"\n if format_version == saver_pb2.SaverDef.V2:\n return prefix + \".index\" # The index file identifies a checkpoint.\n return prefix # Just the data file.\n\n\n@tf_export(\"train.latest_checkpoint\")\ndef latest_checkpoint(checkpoint_dir, latest_filename=None):\n \"\"\"Finds the filename of latest saved checkpoint file.\n\n Args:\n checkpoint_dir: Directory where the variables were saved.\n latest_filename: Optional name for the protocol buffer file that\n contains the list of most recent checkpoint filenames.\n See the corresponding argument to `Saver.save()`.\n\n Returns:\n The full path to the latest checkpoint or `None` if no checkpoint was found.\n \"\"\"\n # Pick the latest checkpoint based on checkpoint state.\n ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)\n if ckpt and ckpt.model_checkpoint_path:\n # Look for either a V2 path or a V1 path, with priority for V2.\n v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\n saver_pb2.SaverDef.V2)\n v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\n saver_pb2.SaverDef.V1)\n if file_io.get_matching_files(v2_path) or file_io.get_matching_files(\n v1_path):\n return ckpt.model_checkpoint_path\n else:\n logging.error(\"Couldn't match files for checkpoint %s\",\n ckpt.model_checkpoint_path)\n return None\n\n\[email protected](\n date=None,\n instructions=\"Use standard file APIs to check for files with this prefix.\")\n@tf_export(v1=[\"train.checkpoint_exists\"])\ndef checkpoint_exists(checkpoint_prefix):\n \"\"\"Checks whether a V1 or V2 checkpoint exists with the specified prefix.\n\n This is the recommended way to check if a checkpoint exists, since it takes\n into account the naming difference between V1 and V2 formats.\n\n Args:\n checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking\n priority. Typically the result of `Saver.save()` or that of\n `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\n V1/V2.\n Returns:\n A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.\n \"\"\"\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\n saver_pb2.SaverDef.V2)\n if file_io.get_matching_files(pathname):\n return True\n elif file_io.get_matching_files(checkpoint_prefix):\n return True\n else:\n return False\n\n\[email protected](\n date=None,\n instructions=\"Use standard file utilities to get mtimes.\")\n@tf_export(v1=[\"train.get_checkpoint_mtimes\"])\ndef get_checkpoint_mtimes(checkpoint_prefixes):\n \"\"\"Returns the mtimes (modification timestamps) of the checkpoints.\n\n Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files\n exist, collect their mtime. Both V2 and V1 checkpoints are considered, in\n that priority.\n\n This is the recommended way to get the mtimes, since it takes into account\n the naming difference between V1 and V2 formats.\n\n Args:\n checkpoint_prefixes: a list of checkpoint paths, typically the results of\n `Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of\n sharded/non-sharded or V1/V2.\n Returns:\n A list of mtimes (in microseconds) of the found checkpoints.\n \"\"\"\n mtimes = []\n\n def match_maybe_append(pathname):\n fnames = file_io.get_matching_files(pathname)\n if fnames:\n mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)\n return True\n return False\n\n for checkpoint_prefix in checkpoint_prefixes:\n # Tries V2's metadata file first.\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\n saver_pb2.SaverDef.V2)\n if match_maybe_append(pathname):\n continue\n # Otherwise, tries V1, where the prefix is the complete pathname.\n match_maybe_append(checkpoint_prefix)\n\n return mtimes\n\n\[email protected](\n date=None,\n instructions=\"Use standard file APIs to delete files with this prefix.\")\n@tf_export(v1=[\"train.remove_checkpoint\"])\ndef remove_checkpoint(checkpoint_prefix,\n checkpoint_format_version=saver_pb2.SaverDef.V2,\n meta_graph_suffix=\"meta\"):\n \"\"\"Removes a checkpoint given by `checkpoint_prefix`.\n\n Args:\n checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result\n of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of\n sharded/non-sharded or V1/V2.\n checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to\n `SaverDef.V2`.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n \"\"\"\n _delete_file_if_exists(\n meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\n if checkpoint_format_version == saver_pb2.SaverDef.V2:\n # V2 has a metadata file and some data files.\n _delete_file_if_exists(checkpoint_prefix + \".index\")\n _delete_file_if_exists(checkpoint_prefix + \".data-?????-of-?????\")\n else:\n # V1, Legacy. Exact match on the data file.\n _delete_file_if_exists(checkpoint_prefix)\n\n\ndef _delete_file_if_exists(filespec):\n \"\"\"Deletes files matching `filespec`.\"\"\"\n for pathname in file_io.get_matching_files(filespec):\n file_io.delete_file(pathname)\n\n\ndef meta_graph_filename(checkpoint_filename, meta_graph_suffix=\"meta\"):\n \"\"\"Returns the meta graph filename.\n\n Args:\n checkpoint_filename: Name of the checkpoint file.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n\n Returns:\n MetaGraph file name.\n \"\"\"\n # If the checkpoint_filename is sharded, the checkpoint_filename could\n # be of format model.ckpt-step#-?????-of-shard#. For example,\n # model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.\n basename = re.sub(r\"-[\\d\\?]+-of-\\d+$\", \"\", checkpoint_filename)\n suffixed_filename = \".\".join([basename, meta_graph_suffix])\n return suffixed_filename\n\n\n# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?\n@tf_export(\"train.CheckpointManager\")\nclass CheckpointManager(object):\n \"\"\"Deletes old checkpoints.\n\n Example usage:\n ```python\n import tensorflow as tf\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n manager = tf.contrib.checkpoint.CheckpointManager(\n checkpoint, directory=\"/tmp/model\", max_to_keep=5)\n status = checkpoint.restore(manager.latest_checkpoint)\n while True:\n # train\n manager.save()\n ```\n\n `CheckpointManager` preserves its own state across instantiations (see the\n `__init__` documentation for details). Only one should be active in a\n particular directory at a time.\n \"\"\"\n\n def __init__(self, checkpoint, directory,\n max_to_keep, keep_checkpoint_every_n_hours=None):\n \"\"\"Configure a `CheckpointManager` for use in `directory`.\n\n If a `CheckpointManager` was previously used in `directory`, its\n state will be restored. This includes the list of managed checkpoints and\n the timestamp bookkeeping necessary to support\n `keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`\n will be the same as the previous `CheckpointManager`, including cleaning up\n existing checkpoints if appropriate.\n\n Checkpoints are only considered for deletion just after a new checkpoint has\n been added. At that point, `max_to_keep` checkpoints will remain in an\n \"active set\". Once a checkpoint is preserved by\n `keep_checkpoint_every_n_hours` it will not be deleted by this\n `CheckpointManager` or any future `CheckpointManager` instantiated in\n `directory` (regardless of the new setting of\n `keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the\n active set may be deleted by this `CheckpointManager` or a future\n `CheckpointManager` instantiated in `directory` (subject to its\n `max_to_keep` and `keep_checkpoint_every_n_hours` settings).\n\n Args:\n checkpoint: The `tf.train.Checkpoint` instance to save and manage\n checkpoints for.\n directory: The path to a directory in which to write checkpoints. A\n special file named \"checkpoint\" is also written to this directory (in a\n human-readable text format) which contains the state of the\n `CheckpointManager`.\n max_to_keep: An integer, the number of checkpoints to keep. Unless\n preserved by `keep_checkpoint_every_n_hours`, checkpoints will be\n deleted from the active set, oldest first, until only `max_to_keep`\n checkpoints remain. If `None`, no checkpoints are deleted and everything\n stays in the active set. Note that `max_to_keep=None` will keep all\n checkpoint paths in memory and in the checkpoint state protocol buffer\n on disk.\n keep_checkpoint_every_n_hours: Upon removal from the active set, a\n checkpoint will be preserved if it has been at least\n `keep_checkpoint_every_n_hours` since the last preserved checkpoint. The\n default setting of `None` does not preserve any checkpoints in this way.\n\n Raises:\n ValueError: If `max_to_keep` is not a positive integer.\n \"\"\"\n self._checkpoint = checkpoint\n self._save_counter_assign = None\n if max_to_keep is not None and max_to_keep <= 0:\n raise ValueError(\n (\"Expected a positive integer or `None` for `max_to_max_to_keep`, \"\n \"got %d.\")\n % (max_to_keep,))\n self._max_to_keep = max_to_keep\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\n self._directory = directory\n self._checkpoint_prefix = os.path.join(directory, \"ckpt\")\n recovered_state = get_checkpoint_state(directory)\n current_clock = time.time()\n self._maybe_delete = collections.OrderedDict()\n if recovered_state is None:\n self._latest_checkpoint = None\n # Set the clock back slightly to avoid race conditions when quckly\n # re-creating a CheckpointManager.\n self._last_preserved_timestamp = current_clock - 1.\n else:\n self._latest_checkpoint = recovered_state.model_checkpoint_path\n self._last_preserved_timestamp = recovered_state.last_preserved_timestamp\n if current_clock < self._last_preserved_timestamp:\n # Time seems to have reversed itself. In addition to this warning, we'll\n # min() saved checkpoint timestamps with the current time to ensure that\n # old checkpoints don't get deleted accidentally.\n logging.warning(\n (\"time.time() returned a value %f seconds behind the last \"\n \"preserved checkpoint timestamp.\")\n % (self._last_preserved_timestamp - current_clock,))\n self._last_preserved_timestamp = current_clock\n all_timestamps = recovered_state.all_model_checkpoint_timestamps\n all_paths = recovered_state.all_model_checkpoint_paths\n del recovered_state # Uses modified values from now on\n if not all_timestamps:\n all_timestamps = [self._last_preserved_timestamp] * len(all_paths)\n\n for filename, timestamp in zip(all_paths, all_timestamps):\n timestamp = min(timestamp, current_clock)\n if timestamp > self._last_preserved_timestamp:\n self._maybe_delete[filename] = timestamp\n\n @property\n def latest_checkpoint(self):\n \"\"\"The prefix of the most recent checkpoint in `directory`.\n\n Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is\n the constructor argument to `CheckpointManager`.\n\n Suitable for passing to `tf.train.Checkpoint.restore` to resume training.\n\n Returns:\n The checkpoint prefix. If there are no checkpoints, returns `None`.\n \"\"\"\n return self._latest_checkpoint\n\n @property\n def checkpoints(self):\n \"\"\"A list of managed checkpoints.\n\n Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not\n show up in this list (to avoid ever-growing filename lists).\n\n Returns:\n A list of filenames, sorted from oldest to newest.\n \"\"\"\n return list(self._maybe_delete.keys())\n\n def _sweep(self):\n \"\"\"Deletes or preserves managed checkpoints.\"\"\"\n if not self._max_to_keep:\n # Does not update self._last_preserved_timestamp, since everything is kept\n # in the active set.\n return\n while len(self._maybe_delete) > self._max_to_keep:\n filename, timestamp = self._maybe_delete.popitem(last=False)\n # Even if we're keeping this checkpoint due to\n # keep_checkpoint_every_n_hours, we won't reference it to avoid\n # infinitely-growing CheckpointState protos.\n if (self._keep_checkpoint_every_n_hours\n and (timestamp - self._keep_checkpoint_every_n_hours * 3600.\n >= self._last_preserved_timestamp)):\n self._last_preserved_timestamp = timestamp\n continue\n remove_checkpoint(filename)\n\n def _record_state(self):\n \"\"\"Saves the `CheckpointManager`'s state in `directory`.\"\"\"\n filenames, timestamps = zip(*self._maybe_delete.items())\n update_checkpoint_state_internal(\n self._directory,\n model_checkpoint_path=self.latest_checkpoint,\n all_model_checkpoint_paths=filenames,\n all_model_checkpoint_timestamps=timestamps,\n last_preserved_timestamp=self._last_preserved_timestamp,\n save_relative_paths=True)\n\n @property\n def _prefix(self):\n \"\"\"A common prefix for all checkpoints saved with this manager.\n\n For example, if `directory` (a constructor argument) were `\"/tmp/tf-model\"`,\n `prefix` would be `\"/tmp/tf-model/ckpt\"` and checkpoints would generally be\n numbered `\"/tmp/tf-model/ckpt-1\"`, `\"/tmp/tf-model/ckpt-2\"`, and so on. Each\n checkpoint has several associated files\n (e.g. `\"/tmp/tf-model/ckpt-2.index\"`).\n\n Returns:\n A string prefix.\n \"\"\"\n return self._checkpoint_prefix\n\n def save(self, checkpoint_number=None):\n \"\"\"Creates a new checkpoint and manages it.\n\n Args:\n checkpoint_number: An optional integer, or an integer-dtype `Variable` or\n `Tensor`, used to number the checkpoint. If `None` (default),\n checkpoints are numbered using `checkpoint.save_counter`. Even if\n `checkpoint_number` is provided, `save_counter` is still incremented. A\n user-provided `checkpoint_number` is not incremented even if it is a\n `Variable`.\n\n Returns:\n The path to the new checkpoint. It is also recorded in the `checkpoints`\n and `latest_checkpoint` properies.\n \"\"\"\n # Save counter logic duplicated from tf.train.Checkpoint, soon to diverge\n # slightly with a custom numbering option.\n if context.executing_eagerly():\n save_counter = self._checkpoint.save_counter\n save_counter.assign_add(1)\n session = None\n else:\n session = ops.get_default_session()\n\n def _initializing_creator(next_creator, **kwargs):\n \"\"\"Initialize the save counter if it has been newly created.\"\"\"\n v = next_creator(**kwargs)\n session.run(v.initializer)\n return v\n\n with variable_scope.variable_creator_scope(_initializing_creator):\n save_counter = self._checkpoint.save_counter\n if self._save_counter_assign is None:\n self._save_counter_assign = save_counter.assign_add(1, read_value=False)\n session.run(self._save_counter_assign)\n if checkpoint_number is None:\n checkpoint_number = save_counter\n if not isinstance(checkpoint_number, compat.integral_types):\n checkpoint_number = training_util.global_step(\n sess=session, global_step_tensor=checkpoint_number)\n prefix = \"%s-%d\" % (self._prefix, checkpoint_number)\n save_path = self._checkpoint.write(prefix)\n timestamp = time.time()\n # If this is an overwritten checkpoint we were previously tracking, delete\n # and reinsert it to make sure it goes to the end of the queue.\n if save_path in self._maybe_delete:\n del self._maybe_delete[save_path]\n self._maybe_delete[save_path] = timestamp\n self._latest_checkpoint = save_path\n self._sweep()\n self._record_state()\n return save_path\n"
] | [
[
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.lib.io.file_io.delete_file",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.training.training_util.global_step",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.lib.io.file_io.stat",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.lib.io.file_io.get_matching_files",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.lib.io.file_io.file_exists",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.lib.io.file_io.read_file_to_string",
"tensorflow.python.training.checkpoint_state_pb2.CheckpointState"
]
] |
mantuoluozk/MFC | [
"e296d7a8e345bc2ca404b5f0fb7f5048f9c5f0d3"
] | [
"code/test_util.py"
] | [
"import h5py\nimport math\nimport nibabel as nib\nimport numpy as np\nfrom medpy import metric\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom skimage.measure import label\n\n\ndef getLargestCC(segmentation):\n labels = label(segmentation)\n assert(labels.max() != 0) # assume at least 1 CC\n largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1\n return largestCC\n\n\ndef test_all_case(net, image_list, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, save_result=True, test_save_path=None, preproc_fn=None, metric_detail=0, nms=0):\n total_metric = 0.0\n loader = tqdm(image_list) if not metric_detail else image_list\n ith = 0\n for image_path in loader:\n # id = image_path.split('/')[-2]\n h5f = h5py.File(image_path, 'r')\n image = h5f['image'][:]\n label = h5f['label'][:]\n if preproc_fn is not None:\n image = preproc_fn(image)\n prediction, score_map = test_single_case(\n net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)\n if nms:\n prediction = getLargestCC(prediction)\n\n if np.sum(prediction) == 0:\n single_metric = (0, 0, 0, 0)\n else:\n single_metric = calculate_metric_percase(prediction, label[:])\n if metric_detail:\n print('%02d,\\t%.5f, %.5f, %.5f, %.5f' % (\n ith, single_metric[0], single_metric[1], single_metric[2], single_metric[3]))\n\n total_metric += np.asarray(single_metric)\n\n if save_result:\n nib.save(nib.Nifti1Image(prediction.astype(np.float32),\n np.eye(4)), test_save_path + \"%02d_pred.nii.gz\" % ith)\n nib.save(nib.Nifti1Image(image[:].astype(np.float32), np.eye(\n 4)), test_save_path + \"%02d_img.nii.gz\" % ith)\n nib.save(nib.Nifti1Image(label[:].astype(np.float32), np.eye(\n 4)), test_save_path + \"%02d_gt.nii.gz\" % ith)\n ith += 1\n\n avg_metric = total_metric / len(image_list)\n print('average metric is {}'.format(avg_metric))\n\n return avg_metric\n\n\ndef test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):\n w, h, d = image.shape\n\n # if the size of image is less than patch_size, then padding it\n add_pad = False\n if w < patch_size[0]:\n w_pad = patch_size[0]-w\n add_pad = True\n else:\n w_pad = 0\n if h < patch_size[1]:\n h_pad = patch_size[1]-h\n add_pad = True\n else:\n h_pad = 0\n if d < patch_size[2]:\n d_pad = patch_size[2]-d\n add_pad = True\n else:\n d_pad = 0\n wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2\n hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2\n dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2\n if add_pad:\n image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),\n (dl_pad, dr_pad)], mode='constant', constant_values=0)\n ww, hh, dd = image.shape\n\n sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1\n sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1\n sz = math.ceil((dd - patch_size[2]) / stride_z) + 1\n # print(\"{}, {}, {}\".format(sx, sy, sz))\n score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)\n cnt = np.zeros(image.shape).astype(np.float32)\n\n for x in range(0, sx):\n xs = min(stride_xy*x, ww-patch_size[0])\n for y in range(0, sy):\n ys = min(stride_xy * y, hh-patch_size[1])\n for z in range(0, sz):\n zs = min(stride_z * z, dd-patch_size[2])\n test_patch = image[xs:xs+patch_size[0],\n ys:ys+patch_size[1], zs:zs+patch_size[2]]\n test_patch = np.expand_dims(np.expand_dims(\n test_patch, axis=0), axis=0).astype(np.float32)\n test_patch = torch.from_numpy(test_patch).cuda()\n\n with torch.no_grad():\n y1_tanh, y1= net(test_patch)\n # ensemble\n y = torch.sigmoid(y1)\n dis_to_mask = torch.sigmoid(-1500*y1_tanh)\n\n y = y.cpu().data.numpy()\n dis2mask = dis_to_mask.cpu().data.numpy()\n y = y[0, :, :, :, :]\n dis2mask = dis2mask[0, :, :, :, :]\n score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \\\n = score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y\n cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \\\n = cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1\n score_map = score_map/np.expand_dims(cnt, axis=0)\n label_map = (score_map[0] > 0.5).astype(np.int)\n\n if add_pad:\n label_map = label_map[wl_pad:wl_pad+w,\n hl_pad:hl_pad+h, dl_pad:dl_pad+d]\n score_map = score_map[:, wl_pad:wl_pad +\n w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]\n return label_map, score_map\n\n\ndef cal_dice(prediction, label, num=2):\n total_dice = np.zeros(num-1)\n for i in range(1, num):\n prediction_tmp = (prediction == i)\n label_tmp = (label == i)\n prediction_tmp = prediction_tmp.astype(np.float)\n label_tmp = label_tmp.astype(np.float)\n\n dice = 2 * np.sum(prediction_tmp * label_tmp) / \\\n (np.sum(prediction_tmp) + np.sum(label_tmp))\n total_dice[i - 1] += dice\n\n return total_dice\n\n\ndef calculate_metric_percase(pred, gt):\n dice = metric.binary.dc(pred, gt)\n jc = metric.binary.jc(pred, gt)\n hd = metric.binary.hd95(pred, gt)\n asd = metric.binary.asd(pred, gt)\n\n return dice, jc, hd, asd\n"
] | [
[
"numpy.sum",
"numpy.eye",
"numpy.bincount",
"numpy.zeros",
"torch.no_grad",
"numpy.asarray",
"numpy.expand_dims",
"torch.from_numpy",
"torch.sigmoid",
"numpy.pad"
]
] |
zs7779/Pytorch_Retinaface | [
"eeb92c28f3217da7439118ed89df8a83c75cc161"
] | [
"retina_models/retinaface.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision.models.detection.backbone_utils as backbone_utils\nimport torchvision.models._utils as _utils\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\nfrom retina_models.net import MobileNetV1 as MobileNetV1\nfrom retina_models.net import FPN as FPN\nfrom retina_models.net import SSH as SSH\n\n\n\nclass ClassHead(nn.Module):\n def __init__(self,inchannels=512,num_anchors=3):\n super(ClassHead,self).__init__()\n self.num_anchors = num_anchors\n self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)\n\n def forward(self,x):\n out = self.conv1x1(x)\n out = out.permute(0,2,3,1).contiguous()\n \n return out.view(out.shape[0], -1, 2)\n\nclass BboxHead(nn.Module):\n def __init__(self,inchannels=512,num_anchors=3):\n super(BboxHead,self).__init__()\n self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)\n\n def forward(self,x):\n out = self.conv1x1(x)\n out = out.permute(0,2,3,1).contiguous()\n\n return out.view(out.shape[0], -1, 4)\n\nclass LandmarkHead(nn.Module):\n def __init__(self,inchannels=512,num_anchors=3):\n super(LandmarkHead,self).__init__()\n self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)\n\n def forward(self,x):\n out = self.conv1x1(x)\n out = out.permute(0,2,3,1).contiguous()\n\n return out.view(out.shape[0], -1, 10)\n\nclass RetinaFace(nn.Module):\n def __init__(self, cfg = None, phase = 'train'):\n \"\"\"\n :param cfg: Network related settings.\n :param phase: train or test.\n \"\"\"\n super(RetinaFace,self).__init__()\n self.phase = phase\n backbone = None\n if cfg['name'] == 'mobilenet0.25':\n backbone = MobileNetV1()\n if cfg['pretrain']:\n checkpoint = torch.load(\"./Pytorch_Retinaface/weights/mobilenetV1X0.25_pretrain.tar\", map_location=torch.device('cpu'))\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in checkpoint['state_dict'].items():\n name = k[7:] # remove module.\n new_state_dict[name] = v\n # load params\n backbone.load_state_dict(new_state_dict)\n elif cfg['name'] == 'Resnet50':\n import torchvision.models as models\n backbone = models.resnet50(pretrained=cfg['pretrain'])\n\n self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])\n in_channels_stage2 = cfg['in_channel']\n in_channels_list = [\n in_channels_stage2 * 2,\n in_channels_stage2 * 4,\n in_channels_stage2 * 8,\n ]\n out_channels = cfg['out_channel']\n self.fpn = FPN(in_channels_list,out_channels)\n self.ssh1 = SSH(out_channels, out_channels)\n self.ssh2 = SSH(out_channels, out_channels)\n self.ssh3 = SSH(out_channels, out_channels)\n\n self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])\n self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])\n self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])\n\n def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):\n classhead = nn.ModuleList()\n for i in range(fpn_num):\n classhead.append(ClassHead(inchannels,anchor_num))\n return classhead\n \n def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):\n bboxhead = nn.ModuleList()\n for i in range(fpn_num):\n bboxhead.append(BboxHead(inchannels,anchor_num))\n return bboxhead\n\n def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):\n landmarkhead = nn.ModuleList()\n for i in range(fpn_num):\n landmarkhead.append(LandmarkHead(inchannels,anchor_num))\n return landmarkhead\n\n def forward(self,inputs):\n out = self.body(inputs)\n\n # FPN\n fpn = self.fpn(out)\n\n # SSH\n feature1 = self.ssh1(fpn[0])\n feature2 = self.ssh2(fpn[1])\n feature3 = self.ssh3(fpn[2])\n features = [feature1, feature2, feature3]\n\n bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)\n classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)\n ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)\n\n if self.phase == 'train':\n output = (bbox_regressions, classifications, ldm_regressions)\n else:\n output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)\n return output"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.Conv2d",
"torch.nn.ModuleList",
"torch.device"
]
] |
ldelebec/asteroid | [
"d6390baca5409634f112ceed554ea66c4054cb54"
] | [
"asteroid/models/demask.py"
] | [
"from torch import nn\nfrom .base_models import BaseEncoderMaskerDecoder\nfrom asteroid_filterbanks import make_enc_dec\nfrom asteroid_filterbanks.transforms import mag, magreim\nfrom ..masknn import norms, activations\nfrom ..utils.torch_utils import pad_x_to_y\nimport warnings\n\n\nclass DeMask(BaseEncoderMaskerDecoder):\n \"\"\"\n Simple MLP model for surgical mask speech enhancement A transformed-domain masking approach is used.\n\n Args:\n input_type (str, optional): whether the magnitude spectrogram \"mag\" or both real imaginary parts \"reim\" are\n passed as features to the masker network.\n Concatenation of \"mag\" and \"reim\" also can be used by using \"cat\".\n output_type (str, optional): whether the masker ouputs a mask\n for magnitude spectrogram \"mag\" or both real imaginary parts \"reim\".\n\n hidden_dims (list, optional): list of MLP hidden layer sizes.\n dropout (float, optional): dropout probability.\n activation (str, optional): type of activation used in hidden MLP layers.\n mask_act (str, optional): Which non-linear function to generate mask.\n norm_type (str, optional): To choose from ``'BN'``, ``'gLN'``,\n ``'cLN'``.\n\n fb_name (str): type of analysis and synthesis filterbanks used,\n choose between [\"stft\", \"free\", \"analytic_free\"].\n n_filters (int): number of filters in the analysis and synthesis filterbanks.\n stride (int): filterbank filters stride.\n kernel_size (int): length of filters in the filterbank.\n encoder_activation (str)\n sample_rate (float): Sampling rate of the model.\n **fb_kwargs (dict): Additional kwards to pass to the filterbank\n creation.\n \"\"\"\n\n def __init__(\n self,\n input_type=\"mag\",\n output_type=\"mag\",\n hidden_dims=(1024,),\n dropout=0.0,\n activation=\"relu\",\n mask_act=\"relu\",\n norm_type=\"gLN\",\n fb_name=\"stft\",\n n_filters=512,\n stride=256,\n kernel_size=512,\n sample_rate=16000,\n **fb_kwargs,\n ):\n encoder, decoder = make_enc_dec(\n fb_name,\n kernel_size=kernel_size,\n n_filters=n_filters,\n stride=stride,\n sample_rate=sample_rate,\n **fb_kwargs,\n )\n\n n_masker_in = self._get_n_feats_input(input_type, encoder.n_feats_out)\n n_masker_out = self._get_n_feats_output(output_type, encoder.n_feats_out)\n masker = build_demask_masker(\n n_masker_in,\n n_masker_out,\n norm_type=norm_type,\n activation=activation,\n hidden_dims=hidden_dims,\n dropout=dropout,\n mask_act=mask_act,\n )\n super().__init__(encoder, masker, decoder)\n\n self.input_type = input_type\n self.output_type = output_type\n self.hidden_dims = hidden_dims\n self.dropout = dropout\n self.activation = activation\n self.mask_act = mask_act\n self.norm_type = norm_type\n\n def _get_n_feats_input(self, input_type, encoder_n_out):\n if input_type == \"reim\":\n return encoder_n_out\n\n if input_type not in {\"mag\", \"cat\"}:\n raise NotImplementedError(\"Input type should be either mag, reim or cat\")\n\n n_feats_input = encoder_n_out // 2\n if input_type == \"cat\":\n n_feats_input += encoder_n_out\n return n_feats_input\n\n def _get_n_feats_output(self, output_type, encoder_n_out):\n if output_type == \"mag\":\n return encoder_n_out // 2\n if output_type == \"reim\":\n return encoder_n_out\n raise NotImplementedError(\"Output type should be either mag or reim\")\n\n def forward_masker(self, tf_rep):\n \"\"\"Estimates masks based on time-frequency representations.\n\n Args:\n tf_rep (torch.Tensor): Time-frequency representation in\n (batch, freq, seq).\n\n Returns:\n torch.Tensor: Estimated masks in (batch, freq, seq).\n \"\"\"\n masker_input = tf_rep\n if self.input_type == \"mag\":\n masker_input = mag(masker_input)\n elif self.input_type == \"cat\":\n masker_input = magreim(masker_input)\n est_masks = self.masker(masker_input)\n if self.output_type == \"mag\":\n est_masks = est_masks.repeat(1, 2, 1)\n return est_masks\n\n def apply_masks(self, tf_rep, est_masks):\n \"\"\"Applies masks to time-frequency representations.\n\n Args:\n tf_rep (torch.Tensor): Time-frequency representations in\n (batch, freq, seq).\n est_masks (torch.Tensor): Estimated masks in (batch, freq, seq).\n\n Returns:\n torch.Tensor: Masked time-frequency representations.\n \"\"\"\n if self.output_type == \"reim\":\n tf_rep = tf_rep.unsqueeze(1)\n return est_masks * tf_rep\n\n def get_model_args(self):\n \"\"\" Arguments needed to re-instantiate the model. \"\"\"\n model_args = {\n \"input_type\": self.input_type,\n \"output_type\": self.output_type,\n \"hidden_dims\": self.hidden_dims,\n \"dropout\": self.dropout,\n \"activation\": self.activation,\n \"mask_act\": self.mask_act,\n \"norm_type\": self.norm_type,\n }\n model_args.update(self.encoder.filterbank.get_config())\n return model_args\n\n\ndef build_demask_masker(\n n_in,\n n_out,\n activation=\"relu\",\n dropout=0.0,\n hidden_dims=(1024,),\n mask_act=\"relu\",\n norm_type=\"gLN\",\n):\n make_layer_norm = norms.get(norm_type)\n net = [make_layer_norm(n_in)]\n layer_activation = activations.get(activation)()\n in_chan = n_in\n for hidden_dim in hidden_dims:\n net.extend(\n [\n nn.Conv1d(in_chan, hidden_dim, 1),\n make_layer_norm(hidden_dim),\n layer_activation,\n nn.Dropout(dropout),\n ]\n )\n in_chan = hidden_dim\n\n net.extend([nn.Conv1d(in_chan, n_out, 1), activations.get(mask_act)()])\n return nn.Sequential(*net)\n"
] | [
[
"torch.nn.Conv1d",
"torch.nn.Dropout",
"torch.nn.Sequential"
]
] |
hanfengzhai/DARPA-FFT | [
"61705c1dcbe7a75a54003db5e8f7db3717e3040c"
] | [
"code/id53.py"
] | [
"import time\nfrom scipy import fftpack\nimport book_format\nbook_format.set_style()\nimport kf_book.kf_internal as kf_internal\nfrom kf_book.kf_internal import DogSimulation\nfrom kf_book import book_plots as book_plots\nimport numpy as np\nfrom matplotlib import pyplot\nimport scipy.io\nimport pandas as pd\nimport pandas_datareader as pdr\nimport seaborn as sns\nfrom pykrige.ok import OrdinaryKriging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nsys.path.insert(0, '../../results')\n\n\njj = 50\n\nwith open('Lat_new.txt', 'r') as f1:\n data1 = f1.read().split(); floats1 = []\n for elem1 in data1:\n try:\n floats1.append(float(elem1))\n except ValueError:\n pass\n\nlat = np.array(data1, dtype = np.float64);lat = np.array_split(lat, 86)\nx1 = lat\n\nwith open('Long_new.txt', 'r') as f2:\n data2 = f2.read().split(); floats2 = []\n for elem2 in data2:\n try:\n floats2.append(float(elem2))\n except ValueError:\n pass\n \nlongdat = np.array(data2, dtype = np.float64);longdat = np.array_split(longdat, 86)\nx2 = longdat\n\nx = np.linspace(0, 405, 405)\nx_benchmark = np.linspace(0, 405, 405)# 550\nxpred = np.linspace(405, 750, 345)#440 - 550\ny_lat = x1[jj][0:405]\ny_long = x2[jj][0:405]\n# y_benchmark = x1[jj][0:550]\n\ny_fft_lat = fftpack.dct(y_lat, norm=\"ortho\")\ny_fft_lat[5:] = 0\ny_filter_lat = fftpack.idct(y_fft_lat, norm=\"ortho\")\n\ny_fft_long = fftpack.dct(y_long, norm=\"ortho\")\ny_fft_long[5:] = 0\ny_filter_long = fftpack.idct(y_fft_long, norm=\"ortho\")\n\n\nt_lat = time.time()\n\nuk_fourier_lat = OrdinaryKriging(\n x, np.zeros(x.shape), y_filter_lat, variogram_model=\"power\"#, exact_values=False\n)\ny_fft_pred_lat, y_fft_std_lat = uk_fourier_lat.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_fourierkriging_lat = time.time() - t_lat\n\n\nuk_lat = OrdinaryKriging(\n x, np.zeros(x.shape), y_lat, variogram_model=\"power\"#, exact_values=False\n)\ny_pred_lat, y_std_lat = uk_lat.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_kriging_lat = time.time() - t_lat\n\n\nt_long = time.time()\n\nuk_fourier_long = OrdinaryKriging(\n x, np.zeros(x.shape), y_filter_long, variogram_model=\"power\"#, exact_values=False\n)\ny_fft_pred_long, y_fft_std_long = uk_fourier_long.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_fourierkriging_long = time.time() - t_long\n\n\nuk_long = OrdinaryKriging(\n x, np.zeros(x.shape), y_long, variogram_model=\"power\"#, exact_values=False\n)\ny_pred_long, y_std_long = uk_long.execute(\"grid\", xpred, np.array([0.0]), backend=\"loop\")\n\ntime_kriging_long = time.time() - t_long\n\n\ny_pred_lat = np.squeeze(y_pred_lat)\ny_std_lat = np.squeeze(y_std_lat)\ny_fft_pred_lat = np.squeeze(y_fft_pred_lat)\ny_fft_std_lat = np.squeeze(y_fft_std_lat)\n\ny_pred_long = np.squeeze(y_pred_long)\ny_std_long = np.squeeze(y_std_long)\ny_fft_pred_long = np.squeeze(y_fft_pred_long)\ny_fft_std_long = np.squeeze(y_fft_std_long)\n\n\ndat_24_lat = y_fft_pred_lat[135:161]\ndat_26_lat = y_fft_pred_lat[184:207]\ndat_28_lat = y_fft_pred_lat[230:253]\ndat_30_lat = y_fft_pred_lat[276:299]\ndat_2_lat = y_fft_pred_lat[322:345]\n\ndat_24_long = y_fft_pred_long[135:161]\ndat_26_long = y_fft_pred_long[184:207]\ndat_28_long = y_fft_pred_long[230:253]\ndat_30_long = y_fft_pred_long[276:299]\ndat_2_long = y_fft_pred_long[322:345]\n\n# =====================================\n\npred_24_lat = np.mean(dat_24_lat)\npred_26_lat = np.mean(dat_26_lat)\npred_28_lat = np.mean(dat_28_lat)\npred_30_lat = np.mean(dat_30_lat)\npred_2_lat = np.mean(dat_2_lat)\n\npred_24_long = np.mean(dat_24_long)\npred_26_long = np.mean(dat_26_long)\npred_28_long = np.mean(dat_28_long)\npred_30_long = np.mean(dat_30_long)\npred_2_long = np.mean(dat_2_long)\n\n# ========SAVE FINAL DATA PREDICTION=========\n\nfinal_pred = [[pred_24_lat, pred_26_lat, pred_28_lat, pred_30_lat, pred_2_lat],[pred_24_long, pred_26_long, pred_28_long, pred_30_long, pred_2_long]]\n\nnp.savetxt(('id'+str(jj)+'.txt'),final_pred)\n"
] | [
[
"numpy.squeeze",
"numpy.zeros",
"scipy.fftpack.dct",
"scipy.fftpack.idct",
"numpy.array_split",
"numpy.array",
"numpy.linspace",
"numpy.mean"
]
] |
miyuush/AtCoder | [
"9481f15b69b99f56334a623f5a63dbb5e6359522"
] | [
"contest/20190303_ABC120/D.py"
] | [
"# Union-findを使う\n# 論文検索には(Disjoint Set)\n\n\nfrom scipy.special import comb\n\nn, m = map(int, input().split())\na = []\nb = []\nfor i in range(m):\n a0, b0 = [int(i) for i in input().split()]\n a.append(a0)\n b.append(b0)\n\na.append(b)\nl = len(a)\ncmb = comb(m, 2, exact=True)\nbase = int(cmb / 2) + 1\n\nfor _ in range(m):\n l -= 2\n if l > base:\n print(0)\n elif l == 4:\n print(cmb-2)\n elif l == 2:\n print(cmb-1)\n elif l == 0:\n print(cmb)\n else:\n print(base)\n base += 1"
] | [
[
"scipy.special.comb"
]
] |
peterorum/ashrae | [
"6527eb71b2102565bb71b402db700b561cea138c"
] | [
"src/001-constant.py"
] | [
"# baseline: constant 0\n# local score 4.668\n# kaggle score 4.69\n\nimport sys # pylint: disable=unused-import\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error\nfrom time import time\n\nimport os\n\nis_kaggle = os.environ['HOME'] == '/tmp'\n\nzipext = '' if is_kaggle else '.zip'\n\n# load data\ntrain = pd.read_csv(f'../input/train.csv{zipext}')\ntest = pd.read_csv(f'../input/test.csv{zipext}')\n\n#-------- main\n\nstart_time = time()\n\ntarget = 'meter_reading'\n\nresult = 0\n\ntrain['predicted'] = result\n\nscore = np.sqrt(mean_squared_error(np.log1p(train[target]), np.log1p(train.predicted)))\n\nprint('score', score)\n\ntest[target] = result\n\npredictions = test[['row_id', target]]\n\npredictions.to_csv('submission.csv', index=False)\n\nprint('%.0f mins' % ((time() - start_time) / 60))\n"
] | [
[
"pandas.read_csv",
"numpy.log1p"
]
] |
Danielhp95/Regym | [
"f0f0be0ad23bf1a3410ecd9ed9b8025947d6080a"
] | [
"regym/rl_algorithms/TQL/repeated_update_q_learning.py"
] | [
"import numpy as np\n\n\nclass RepeatedUpdateQLearningAlgorithm():\n '''\n Repeated Update Q Learning (RUQL) as introduced in:\n \"Addressing the Policy Bias of Q-Learning by Repeating Updates\" - Sherief Abdallah, Michael Kaisers\n '''\n def __init__(self, state_space_size, action_space_size, hashing_function, discount_factor, learning_rate, temperature):\n self.Q_table = np.zeros((state_space_size, action_space_size), dtype=np.float64)\n self.learning_rate = learning_rate\n self.hashing_function = hashing_function\n self.temperature = temperature\n self.discount_factor = discount_factor\n\n def update_q_table(self, s, a, r, succ_s):\n s, succ_s = self.hashing_function(s), self.hashing_function(succ_s)\n probability_taking_action_a = self.boltzman_exploratory_policy_from_state(s)[a]\n x = (1 - self.learning_rate)**(1 / probability_taking_action_a)\n self.Q_table[s, a] = x * self.Q_table[s, a] + (1 - x) * (r + self.discount_factor * max(self.Q_table[succ_s, :]))\n\n def boltzman_exploratory_policy_from_state(self, s):\n exp_q_values = np.exp([self.Q_table[s, i] / self.temperature for i in range(self.Q_table.shape[1])])\n normalizing_constant = sum(exp_q_values)\n return np.divide(exp_q_values, normalizing_constant)\n\n def find_moves(self, state, exploration):\n state = self.hashing_function(state)\n if exploration:\n p = self.boltzman_exploratory_policy_from_state(state)\n return np.random.choice(range(self.Q_table.shape[1]), p=p)\n else:\n optimal_moves = np.argwhere(self.Q_table[state, :] == np.amax(self.Q_table[state, :]))\n return np.random.choice(optimal_moves.flatten().tolist())\n"
] | [
[
"numpy.amax",
"numpy.divide",
"numpy.zeros"
]
] |
PandoraLS/WG-WaveNet | [
"5f27e61cc4d3554af8c16fa35345831099b703e8"
] | [
"model/loss.py"
] | [
"import torch\nimport librosa\nimport numpy as np\nimport torch.nn.functional as F\nfrom hparams import hparams as hps\nfrom utils.util import to_arr, mode\n\n\nclass Loss(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper(Loss, self).__init__()\n\t\tself.d = 2*hps.sigma*hps.sigma\n\t\tself.loss = MultiResolutionSTFTLoss(hps.fft_sizes, hps.hop_sizes,\n\t\t\t\t\t\t\t\t\t\t\thps.win_lengths, hps.mel_scales)\n\n\tdef forward(self, model_output, p_wavs = None, r_wavs = None):\n\t\t# zloss\n\t\tz, log_s_list, log_w_list = model_output\n\t\tlog_s_total = 0\n\t\tlog_w_total = 0\n\t\tfor i, log_s in enumerate(log_s_list):\n\t\t\tlog_s_total += torch.sum(log_s)\n\t\t\tlog_w_total += torch.sum(log_w_list[i])\n\t\tzloss = torch.sum(z*z)/self.d-log_s_total-log_w_total\n\t\tzloss /= (z.size(0)*z.size(1)*z.size(2))\n\t\t\n\t\t# sloss\n\t\tsloss = self.loss(p_wavs, r_wavs) if p_wavs is not None else 0*zloss\n\n\t\treturn zloss+sloss, zloss, sloss\n\n\nclass MultiResolutionSTFTLoss(torch.nn.Module):\n\t# ref: https://github.com/kan-bayashi/ParallelWaveGAN\n\t\"\"\"Multi resolution STFT loss module.\"\"\"\n\tdef __init__(self,\n\t\t\t\t fft_sizes=[1024, 2048, 512],\n\t\t\t\t hop_sizes=[120, 240, 50],\n\t\t\t\t win_lengths=[600, 1200, 240],\n\t\t\t\t mel_scales=[1, 1, 1],\n\t\t\t\t window=\"hann_window\"):\n\t\t\"\"\"Initialize Multi resolution STFT loss module.\n\n\t\tArgs:\n\t\t\tfft_sizes (list): List of FFT sizes.\n\t\t\thop_sizes (list): List of hop sizes.\n\t\t\twin_lengths (list): List of window lengths.\n\t\t\twindow (str): Window function type.\n\n\t\t\"\"\"\n\t\tsuper(MultiResolutionSTFTLoss, self).__init__()\n\t\tassert len(fft_sizes) == len(hop_sizes) == len(win_lengths)\n\t\tself.stft_losses = torch.nn.ModuleList()\n\t\tself.bases = []\n\t\tfor fs, ss, wl, sc in zip(fft_sizes, hop_sizes, win_lengths, mel_scales):\n\t\t\tself.stft_losses += [STFTLoss(fs, ss, wl, window)]\n\t\t\tb = librosa.filters.mel(hps.sample_rate, fs, n_mels = hps.num_mels*sc, fmax = hps.fmax).T\n\t\t\tself.bases += [mode(torch.Tensor(b))]\n\n\tdef forward(self, x, y):\n\t\t\"\"\"Calculate forward propagation.\n\n\t\tArgs:\n\t\t\tx (Tensor): Predicted signal (B, T).\n\t\t\ty (Tensor): Groundtruth signal (B, T).\n\n\t\tReturns:\n\t\t\tTensor: Multi resolution spectral convergence loss value.\n\t\t\tTensor: Multi resolution log spectral loss value.\n\n\t\t\"\"\"\n\t\tsc_loss = 0.0\n\t\tspec_loss = 0.0\n\t\tfor f, b in zip(self.stft_losses, self.bases):\n\t\t\tsc_l, spec_l = f(x, y, b)\n\t\t\tsc_loss += sc_l\n\t\t\tspec_loss += spec_l\n\t\tsc_loss /= len(self.stft_losses)\n\t\tspec_loss /= len(self.stft_losses)\n\n\t\treturn sc_loss+spec_loss\n\n\nclass STFTLoss(torch.nn.Module):\n\t\"\"\"STFT loss module.\"\"\"\n\n\tdef __init__(self, fft_size=1024, shift_size=120, win_length=600, window=\"hann_window\"):\n\t\t\"\"\"Initialize STFT loss module.\"\"\"\n\t\tsuper(STFTLoss, self).__init__()\n\t\tself.fft_size = fft_size\n\t\tself.shift_size = shift_size\n\t\tself.win_length = win_length\n\t\tself.window = mode(getattr(torch, window)(win_length))\n\n\tdef forward(self, x, y, b):\n\t\t\"\"\"Calculate forward propagation.\n\n\t\tArgs:\n\t\t\tx (Tensor): Predicted signal (B, T).\n\t\t\ty (Tensor): Groundtruth signal (B, T).\n\t\t\tb (Tensor): Mel basis (fft_size//2+1, num_mels).\n\n\t\tReturns:\n\t\t\tTensor: Spectral convergence loss value.\n\t\t\tTensor: Log STFT magnitude loss value.\n\n\t\t\"\"\"\n\t\tx_mag, x_mel = stft(x, self.fft_size, self.shift_size, self.win_length, self.window, b)\n\t\ty_mag, y_mel = stft(y, self.fft_size, self.shift_size, self.win_length, self.window, b)\n\t\tsc_loss = spec_loss = 0\n\t\tif hps.mag:\n\t\t\th = x_mag.size(2)*2*hps.fmax//hps.sample_rate if hps.sample_rate >= 2*hps.fmax else x_mag.size(2)\n\t\t\tx_mag_ = x_mag[:, :, :h]\n\t\t\ty_mag_ = y_mag[:, :, :h]\n\t\t\tsc_loss += torch.norm((y_mag_-x_mag_), p = \"fro\")/torch.norm(y_mag_, p = \"fro\")\n\t\t\tspec_loss += torch.nn.L1Loss()(torch.log(x_mag_), torch.log(y_mag_))\n\t\t\tif h < x_mag.size(2):\n\t\t\t\tx_mag_m = x_mag[:, :, h:].mean(1)\n\t\t\t\ty_mag_m = y_mag[:, :, h:].mean(1)\n\t\t\t\tsc_loss += torch.norm((y_mag_m-x_mag_m), p = \"fro\")/torch.norm(y_mag_m, p = \"fro\")\n\t\t\t\tspec_loss += torch.nn.L1Loss()(torch.log(x_mag_m), torch.log(y_mag_m))\n\t\tif hps.mel:\n\t\t\tsc_loss += torch.norm((y_mel-x_mel), p = \"fro\")/torch.norm(y_mel, p = \"fro\")\n\t\t\tspec_loss += torch.nn.L1Loss()(torch.log(x_mel), torch.log(y_mel))\n\t\ts = int(hps.mag)+int(hps.mel)\n\t\tif s == 0:\n\t\t\tprint('Error: hps.mag and hps.mel are both set as False.')\n\t\t\texit()\n\t\treturn sc_loss/s, spec_loss/s\n\n\ndef stft(x, fft_size, hop_size, win_length, window, b):\n\t\"\"\"Perform STFT and convert to magnitude spectrogram.\n\n\tArgs:\n\t\tx (Tensor): Input signal tensor (B, T).\n\t\tfft_size (int): FFT size.\n\t\thop_size (int): Hop size.\n\t\twin_length (int): Window length.\n\t\twindow (str): Window function type.\n\t\tb (Tensor): Mel basis (fft_size//2+1, num_mels).\n\n\tReturns:\n\t\tTensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).\n\n\t\"\"\"\n\tx_stft = torch.stft(x, fft_size, hop_size, win_length, window)\n\treal = x_stft[..., 0]\n\timag = x_stft[..., 1]\n\n\t# NOTE(kan-bayashi): clamp is needed to avoid nan or inf\n\tmag = torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)\n\treturn mag, torch.clamp(torch.matmul(mag, b), min = 1e-7**0.5)\n\n"
] | [
[
"torch.sum",
"torch.Tensor",
"torch.clamp",
"torch.nn.L1Loss",
"torch.norm",
"torch.log",
"torch.nn.ModuleList",
"torch.stft",
"torch.matmul"
]
] |
kanga333/PyAthena | [
"487baa66ae203c3541d37191600f1f3219a2e1ac"
] | [
"pyathena/util.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport functools\nimport logging\nimport threading\nimport re\nimport uuid\n\nimport tenacity\nfrom past.builtins import xrange\nfrom tenacity import (after_log, retry_if_exception,\n stop_after_attempt, wait_exponential)\n\nfrom pyathena import DataError, OperationalError\nfrom pyathena.model import AthenaCompression\n\n_logger = logging.getLogger(__name__)\n\nPATTERN_OUTPUT_LOCATION = re.compile(r'^s3://(?P<bucket>[a-zA-Z0-9.\\-_]+)/(?P<key>.+)$')\n\n\ndef parse_output_location(output_location):\n match = PATTERN_OUTPUT_LOCATION.search(output_location)\n if match:\n return match.group('bucket'), match.group('key')\n else:\n raise DataError('Unknown `output_location` format.')\n\n\ndef get_chunks(df, chunksize=None):\n rows = len(df)\n if rows == 0:\n return\n if chunksize is None:\n chunksize = rows\n elif chunksize <= 0:\n raise ValueError('Chunk size argument must be greater than zero')\n\n chunks = int(rows / chunksize) + 1\n for i in xrange(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, rows)\n if start_i >= end_i:\n break\n yield df[start_i:end_i]\n\n\ndef reset_index(df, index_label=None):\n df.index.name = index_label if index_label else 'index'\n try:\n df.reset_index(inplace=True)\n except ValueError as e:\n raise ValueError('Duplicate name in index/columns: {0}'.format(e))\n\n\ndef as_pandas(cursor, coerce_float=False):\n from pandas import DataFrame\n names = [metadata[0] for metadata in cursor.description]\n return DataFrame.from_records(cursor.fetchall(), columns=names,\n coerce_float=coerce_float)\n\n\ndef to_sql_type_mappings(col):\n import pandas as pd\n col_type = pd._lib.infer_dtype(col, skipna=True)\n if col_type == 'datetime64' or col_type == 'datetime':\n return 'TIMESTAMP'\n elif col_type == 'timedelta':\n return 'INT'\n elif col_type == \"timedelta64\":\n return 'BIGINT'\n elif col_type == 'floating':\n if col.dtype == 'float32':\n return 'FLOAT'\n else:\n return 'DOUBLE'\n elif col_type == 'integer':\n if col.dtype == 'int32':\n return 'INT'\n else:\n return 'BIGINT'\n elif col_type == 'boolean':\n return 'BOOLEAN'\n elif col_type == \"date\":\n return 'DATE'\n elif col_type == 'bytes':\n return 'BINARY'\n elif col_type in ['complex', 'time']:\n raise ValueError('{0} datatype not supported'.format(col_type))\n return 'STRING'\n\n\ndef to_sql(df, name, conn, location, schema='default',\n index=False, index_label=None, chunksize=None,\n if_exists='fail', compression=None, flavor='spark',\n type_mappings=to_sql_type_mappings):\n # TODO Supports orc, avro, json, csv or tsv format\n # TODO Supports partitioning\n if if_exists not in ('fail', 'replace', 'append'):\n raise ValueError('`{0}` is not valid for if_exists'.format(if_exists))\n if compression is not None and not AthenaCompression.is_valid(compression):\n raise ValueError('`{0}` is not valid for compression'.format(compression))\n\n import pyarrow as pa\n import pyarrow.parquet as pq\n bucket_name, key_prefix = parse_output_location(location)\n bucket = conn.session.resource('s3', region_name=conn.region_name,\n **conn._client_kwargs).Bucket(bucket_name)\n cursor = conn.cursor()\n retry_config = conn.retry_config\n\n table = cursor.execute(\"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = '{schema}'\n AND table_name = '{table}'\n \"\"\".format(schema=schema, table=name)).fetchall()\n if if_exists == 'fail':\n if table:\n raise OperationalError('Table `{0}.{1}` already exists.'.format(schema, name))\n elif if_exists == 'replace':\n if table:\n cursor.execute(\"\"\"\n DROP TABLE {schema}.{table}\n \"\"\".format(schema=schema, table=name))\n objects = bucket.objects.filter(Prefix=key_prefix)\n if list(objects.limit(1)):\n objects.delete()\n\n if index:\n reset_index(df, index_label)\n for chunk in get_chunks(df, chunksize):\n table = pa.Table.from_pandas(chunk)\n buf = pa.BufferOutputStream()\n pq.write_table(table, buf,\n compression=compression,\n flavor=flavor)\n retry_api_call(bucket.put_object,\n config=retry_config,\n Body=buf.getvalue().to_pybytes(),\n Key=key_prefix + str(uuid.uuid4()))\n\n ddl = generate_ddl(df=df,\n name=name,\n location=location,\n schema=schema,\n compression=compression,\n type_mappings=type_mappings)\n cursor.execute(ddl)\n\n\ndef get_column_names_and_types(df, type_mappings):\n return [\n (str(df.columns[i]), type_mappings(df.iloc[:, i]))\n for i in xrange(len(df.columns))\n ]\n\n\ndef generate_ddl(df, name, location, schema='default', compression=None,\n type_mappings=to_sql_type_mappings):\n ddl = 'CREATE EXTERNAL TABLE IF NOT EXISTS `{0}`.`{1}` (\\n'.format(schema, name)\n ddl += ',\\n'.join([\n '`{0}` {1}'.format(c[0], c[1])\n for c in get_column_names_and_types(df, type_mappings)\n ])\n ddl += '\\n)\\n'\n ddl += 'STORED AS PARQUET\\n'\n ddl += \"LOCATION '{0}'\\n\".format(location)\n if compression:\n ddl += \"TBLPROPERTIES ('parquet.compress'='{0}')\\n\".format(compression.upper())\n return ddl\n\n\ndef synchronized(wrapped):\n \"\"\"The missing @synchronized decorator\n\n https://git.io/vydTA\"\"\"\n _lock = threading.RLock()\n\n @functools.wraps(wrapped)\n def _wrapper(*args, **kwargs):\n with _lock:\n return wrapped(*args, **kwargs)\n return _wrapper\n\n\nclass RetryConfig(object):\n\n def __init__(self, exceptions=('ThrottlingException', 'TooManyRequestsException'),\n attempt=5, multiplier=1, max_delay=100, exponential_base=2):\n self.exceptions = exceptions\n self.attempt = attempt\n self.multiplier = multiplier\n self.max_delay = max_delay\n self.exponential_base = exponential_base\n\n\ndef retry_api_call(func, config, logger=None,\n *args, **kwargs):\n retry = tenacity.Retrying(\n retry=retry_if_exception(\n lambda e: getattr(e, 'response', {}).get(\n 'Error', {}).get('Code', None) in config.exceptions\n if e else False),\n stop=stop_after_attempt(config.attempt),\n wait=wait_exponential(multiplier=config.multiplier,\n max=config.max_delay,\n exp_base=config.exponential_base),\n after=after_log(logger, logger.level) if logger else None,\n reraise=True\n )\n return retry(func, *args, **kwargs)\n"
] | [
[
"pandas._lib.infer_dtype"
]
] |
1170300521/StyleGAN-nada | [
"1b6dc2d7dcbc37dd2e29af2f8b59d7635e6a26ec"
] | [
"ZSSGAN/utils/svm.py"
] | [
"import numpy as np\nfrom sklearn import svm\n\n\ndef train_boundary(pos_codes, neg_codes, split_ratio=0.7):\n pos_ids = np.arange(len(pos_codes))\n np.random.shuffle(pos_ids)\n train_pos_num = int(len(pos_ids) * split_ratio)\n train_pos_codes = pos_codes[pos_ids[:train_pos_num]]\n val_pos_codes = pos_codes[pos_ids[train_pos_num:]]\n\n neg_ids = np.arange(len(neg_codes))\n np.random.shuffle(neg_ids)\n train_neg_num = int(len(neg_ids) * split_ratio)\n train_neg_codes = neg_codes[neg_ids[:train_neg_num]]\n val_neg_codes = neg_codes[neg_ids[train_neg_num:]]\n\n train_data = np.concatenate([train_pos_codes, train_neg_codes], axis=0)\n train_label = np.concatenate([np.ones(train_pos_num, dtype=np.int),\n np.zeros(train_neg_num, dtype=np.int)], axis=0)\n print(f'Training: {train_pos_num} positive, {train_neg_num} negtive.')\n\n val_data = np.concatenate([val_pos_codes, val_neg_codes], axis=0)\n val_label = np.concatenate([np.ones(len(val_pos_codes)),\n np.zeros(len(val_neg_codes))], axis=0)\n print(f'Validation: {len(val_pos_codes)} positive, {len(val_neg_codes)} negtive.')\n\n clf = svm.SVC(kernel='linear')\n classifier = clf.fit(train_data, train_label)\n\n if len(val_label) > 0:\n val_pred = classifier.predict(val_data)\n correct_num = np.sum(val_label == val_pred)\n print(f'Accurracy for validattion set: {correct_num} / {len(val_label)} = {correct_num / len(val_label):.6f}.')\n \n a = classifier.coef_.reshape(1, pos_codes.shape[1]).astype(np.float32)\n\n # Specific for initialization of dynamic svm\n if split_ratio == 1:\n return np.concatenate([a, [classifier.intercept_.astype(np.float)]], axis=-1)\n return a / np.linalg.norm(a)\n\ndef get_delta_w(pos_path, output_path, delta_w_type='svm', args=None,\\\n neg_path=\"/home/ybyb/CODE/StyleGAN-nada/results/invert/A_gen_w.npy\"):\n pos_codes = np.load(pos_path).reshape((-1, 18, 512))[:, 0:(18-args.num_mask_last)]\n neg_codes = np.load(neg_path).reshape((-1, 18, 512))[:, 0:(18-args.num_mask_last)]\n chosen_num = min(500, len(neg_codes))\n pos_num = min(10000, len(pos_codes))\n # np.save(\"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/mean_delta_w.npy\", (pos_codes.mean(0) - neg_codes.mean(0)))\n np.random.shuffle(pos_codes)\n np.random.shuffle(neg_codes)\n pos_codes = pos_codes[0:pos_num].reshape((pos_num, -1))\n neg_codes = neg_codes[0:chosen_num].reshape((chosen_num, -1))\n if delta_w_type == 'svm':\n a = train_boundary(pos_codes, neg_codes, split_ratio=0.7)\n elif delta_w_type == 'mean':\n a = pos_codes.mean(0) - neg_codes.mean(0)\n a = a / np.linalg.norm(a)\n else:\n raise RuntimeError(f\"No type namely {delta_w_type}!\")\n tmp = np.zeros((18, 512))\n tmp[0:(18-args.num_mask_last)] = a.reshape((-1, 512))\n np.save(output_path, tmp)\n\nif __name__ == \"__main__\":\n pos_path = \"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/B_codes.npy\"\n # neg_path = \"/home/ybyb/CODE/StyleGAN-nada/results/invert/ffhq_w+.npy\"\n neg_path = \"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/A_codes.npy\"\n output_path = \"/home/ybyb/CODE/StyleGAN-nada/results/demo_ffhq/photo+Image_1/test/small_delta_w.npy\"\n get_delta_w(pos_path, neg_path, output_path)"
] | [
[
"numpy.save",
"numpy.sum",
"numpy.random.shuffle",
"numpy.ones",
"sklearn.svm.SVC",
"numpy.zeros",
"numpy.load",
"numpy.concatenate",
"numpy.linalg.norm"
]
] |
Giuseppe5/pytorch-ocr | [
"f8e89295e911c7a3eec6e3aa13335c031cd3adfe"
] | [
"main.py"
] | [
"# Copyright (c) 2018, Xilinx, Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport argparse\nimport json\nimport os\n\nimport torch\nimport numpy as np\nfrom ocr import PytorchOCRTrainer\n\ntorch.backends.cudnn.enabled = False\ntorch.set_printoptions(precision=10)\n\nclass objdict(dict):\n def __getattr__(self, name):\n if name in self:\n return self[name]\n else:\n raise AttributeError(\"No such attribute: \" + name)\n\n def __setattr__(self, name, value):\n self[name] = value\n\n def __delattr__(self, name):\n if name in self:\n del self[name]\n else:\n raise AttributeError(\"No such attribute: \" + name)\n\ndef ascii_encode_dict(data):\n ascii_encode = lambda x: x.encode('ascii')\n return dict(map(ascii_encode, pair) if isinstance(pair[1], unicode) else pair for pair in data.items())\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='OCR training')\n parser.add_argument('--params', '-p', default=\"default_trainer_params.json\", help='Path to params JSON file. Default ignored when resuming.')\n parser.add_argument('--experiments', '-e', default=\"experiments\", help='Path for experiments. Ignored when resuming.')\n parser.add_argument('--input', '-i', help='Path to input checkpoint.')\n parser.add_argument('--pretrained_policy', default=\"RESUME\", help='RESUME/RETRAIN.')\n parser.add_argument('--init_bn_fc_fusion', default=False, action='store_true', help='Init BN FC fusion.')\n parser.add_argument('--eval', default=False, action='store_true', help='Perform only evaluation on val dataset.')\n parser.add_argument('--export', default=False, action='store_true', help='Perform only export of quantized weights.')\n parser.add_argument('--no_cuda', default=False, action='store_true', help='Run on CPU.')\n parser.add_argument('--export_test_image', default=False, action='store_true', help='Export pre-quantized and reshaped test image.')\n parser.add_argument('--valid', default=\"db_files_uw3-500/valid.txt\", help='Input path for val file.')\n parser.add_argument('--sortedtrain', default=\"db_files_uw3-500/sortedTrain.txt\", help='Input path for train file.')\n parser.add_argument('--imgs', default=\"db_files_uw3-500/imgs\", help='Input path for images dir.')\n parser.add_argument('--dry_run', default=False, action='store_true', help='Do not write any output file.')\n parser.add_argument('--simd_factor', default=1, type=int, help='SIMD factor for export.')\n parser.add_argument('--pe', default=1, type=int, help='Number of PEs for export.')\n\n #Overrides\n parser.add_argument('--random_seed', type=int)\n parser.add_argument('--batch_size', type=int)\n parser.add_argument('--num_workers', type=int)\n parser.add_argument('--layer_size', type=int)\n parser.add_argument('--neuron_type', type=str)\n parser.add_argument('--target_height', type=int)\n parser.add_argument('--epochs', type=int)\n parser.add_argument('--lr', type=float)\n parser.add_argument('--lr_schedule', type=str)\n parser.add_argument('--lr_step', type=int)\n parser.add_argument('--lr_gamma', type=float)\n parser.add_argument('--max_norm', type=float)\n parser.add_argument('--seq_to_random_threshold', type=int)\n parser.add_argument('--bidirectional', type=bool)\n parser.add_argument('--reduce_bidirectional', type=str)\n parser.add_argument('--recurrent_bias_enabled', type=bool)\n parser.add_argument('--checkpoint_interval', type=int)\n parser.add_argument('--recurrent_weight_bit_width', type=int)\n parser.add_argument('--recurrent_weight_quantization', type=str)\n parser.add_argument('--recurrent_bias_bit_width', type=int)\n parser.add_argument('--recurrent_bias_quantization', type=str)\n parser.add_argument('--recurrent_activation_bit_width', type=int)\n parser.add_argument('--recurrent_activation_quantization', type=str)\n parser.add_argument('--internal_activation_bit_width', type=int)\n parser.add_argument('--fc_weight_bit_width', type=int)\n parser.add_argument('--fc_weight_quantization', type=str)\n parser.add_argument('--fc_bias_bit_width', type=int)\n parser.add_argument('--fc_bias_quantization', type=str)\n parser.add_argument('--quantize_input', type=bool)\n parser.add_argument('--mask_padded', type=bool)\n\n args = parser.parse_args()\n\n #Set paths relative to main.py\n path_args = ['params', 'experiments', 'input', 'valid', 'sortedtrain', 'imgs']\n for path_arg in path_args:\n path = getattr(args, path_arg)\n if path is not None and not os.path.isabs(path):\n abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path))\n setattr(args, path_arg, abs_path)\n\n #Avoid creating new folders etc. \n if args.eval or args.export or args.export_test_image:\n args.dry_run = True\n\n #force cpu when exporting weights\n if args.export or args.export_test_image:\n args.no_cuda = True\n\n if args.input and args.pretrained_policy == \"RESUME\" and args.params == \"default_trainer_params.json\":\n package = torch.load(args.input, map_location=lambda storage, loc: storage)\n trainer_params = package['trainer_params']\n else:\n with open(args.params) as d:\n trainer_params = json.load(d, object_hook=ascii_encode_dict)\n trainer_params = objdict(trainer_params)\n\n #Overrides\n if args.epochs is not None:\n trainer_params.epochs = args.epochs\n if args.internal_activation_bit_width is not None:\n trainer_params.internal_activation_bit_width = args.internal_activation_bit_width\n\n trainer = PytorchOCRTrainer(trainer_params, args)\n\n if args.export_test_image:\n trainer.export_test_image(trainer_params.target_height)\n exit(0)\n\n if args.export:\n trainer.export_model(args.simd_factor, args.pe)\n exit(0)\n\n if args.eval:\n trainer.eval_model()\n else:\n trainer.train_model()\n\n\n\n\n\n\n"
] | [
[
"torch.set_printoptions",
"torch.load"
]
] |
alsmeirelles/ResRep | [
"abc8d221cfa153de577ca1bbba515cc7abb94378"
] | [
"display_hdf5.py"
] | [
"from utils.misc import read_hdf5\nfrom utils.misc import extract_deps_from_weights_file\nimport sys\nimport numpy as np\n\nwf = sys.argv[1]\ndeps = extract_deps_from_weights_file(wf)\ndi = read_hdf5(wf)\nnum_kernel_params = 0\n\nconv_kernel_cnt = 0\nmatrix_param_cnt = 0\nvec_param_cnt = 0\n\nbias_cnt = 0\nbeta_cnt = 0\ngamma_cnt = 0\nmu_cnt = 0\nvar_cnt = 0\n\nfor name, array in di.items():\n if array.ndim in [2, 4]:\n num_kernel_params += array.size\n\n if 'base_mask' in name:\n print(name, array)\n\n print(name, array.shape, np.mean(array), np.std(array),\n ' positive {}, negative {}, zeros {}, near-zero {}'.format(np.sum(array > 0), np.sum(array < 0), np.sum(array == 0),\n np.sum(np.abs(array) <= 1e-5)))\n\n if array.ndim == 2:\n matrix_param_cnt += array.size\n elif array.ndim == 1:\n vec_param_cnt += array.size\n elif array.ndim == 4:\n conv_kernel_cnt += array.size\n if 'running_mean' in name or 'moving_mean' in name:\n mu_cnt += array.size\n elif 'running_var' in name or 'moving_var' in name:\n var_cnt += array.size\n elif ('weight' in name and 'bn' in name.lower()) or 'gamma' in name:\n gamma_cnt += array.size\n elif ('bias' in name and 'bn' in name.lower()) or 'beta' in name:\n beta_cnt += array.size\n elif 'bias' in name:\n bias_cnt += array.size\n elif 'spatial_mask' in name:\n print(array)\n print(np.sum(array))\n\nprint('number of kernel params: ', num_kernel_params)\nprint('vec {}, matrix {}, conv {}, total {}'.format(vec_param_cnt, matrix_param_cnt, conv_kernel_cnt,\n vec_param_cnt + matrix_param_cnt + conv_kernel_cnt))\nprint('mu {}, var {}, gamma {}, beta {}, bias {}'.format(mu_cnt, var_cnt, gamma_cnt, beta_cnt, bias_cnt))\n\nprint('Model deps: {}'.format(deps))\n"
] | [
[
"numpy.std",
"numpy.sum",
"numpy.abs",
"numpy.mean"
]
] |
xsppp/gpipe_with_Mnist | [
"5cd8aff375e7f8fc3c6fb065ce3f40854eb6f31a"
] | [
"lingvo/tasks/car/input_preprocessors.py"
] | [
"# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Input preprocessors.\"\"\"\n\nfrom lingvo import compat as tf\nfrom lingvo.core import base_layer\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\nfrom lingvo.tasks.car import car_lib\nfrom lingvo.tasks.car import detection_3d_lib\nfrom lingvo.tasks.car import geometry\nfrom lingvo.tasks.car import ops\nimport numpy as np\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops import inplace_ops\n# pylint:enable=g-direct-tensorflow-import\n\n\ndef _ConsistentShuffle(tensors, seed):\n \"\"\"Shuffle multiple tensors with the same shuffle order.\"\"\"\n shuffled_idx = tf.range(tf.shape(tensors[0])[0])\n shuffled_idx = tf.random.shuffle(shuffled_idx, seed=seed)\n return tuple([tf.gather(t, shuffled_idx) for t in tensors])\n\n\ndef _GetApplyPointMaskFn(points_mask):\n \"\"\"Returns a function that applies a mask to one of our points tensors.\"\"\"\n\n def _ApplyPointMaskFn(points_tensor):\n \"\"\"Applies a mask to the points tensor.\"\"\"\n if points_tensor is None:\n return points_tensor\n return tf.boolean_mask(points_tensor, points_mask)\n\n return _ApplyPointMaskFn\n\n\ndef _Dense(sparse):\n return tf.sparse_to_dense(\n sparse_indices=sparse.indices,\n output_shape=sparse.dense_shape,\n sparse_values=sparse.values,\n default_value=0)\n\n\nclass Preprocessor(base_layer.BaseLayer):\n \"\"\"Base class for input preprocessor.\n\n Input preprocessors expect the combined output of all extractors and performs\n a transformation on them. Input preprocessors can add/edit/remove fields\n from the NestedMap of features.\n\n Note: Features correspond to that for one example (no batch dimension).\n\n Sub-classes need to implement the following three functions:\n\n 1) TransformFeatures(features): Given a NestedMap of features representing the\n output of all the extractors, apply a transformation on the features.\n\n 2) TransformShapes(shapes): Given a corresponding NestedMap of shapes,\n produce a NestedMap of shapes that corresponds to the transformation of the\n features after TransformFeatures.\n\n 3) TransformDTypes(dtypes): Given a corresponding NestedMap of dtypes,\n produce a NestedMap of dtypes that corresponds to the transformation of the\n features after TransformFeatures.\n\n The preprocessor is expected to explicitly pass through untouched fields.\n For example, a preprocessor that does data augmentation should modify the\n features NestedMap on the fields it cares about augmenting, and then return\n the features NestedMap.\n \"\"\"\n\n @classmethod\n def Params(cls):\n \"\"\"Default params.\"\"\"\n p = super().Params()\n p.name = cls.__name__\n return p\n\n def FProp(self, theta, features):\n \"\"\"Performs TransformFeatures.\"\"\"\n del theta # unused\n return self.TransformFeatures(features)\n\n def TransformFeatures(self, features):\n \"\"\"Transforms the features for one example.\n\n Args:\n features: A `NestedMap` of tensors.\n\n Returns:\n A `NestedMap` of tensors corresponding.\n \"\"\"\n raise NotImplementedError()\n\n def TransformShapes(self, shapes):\n \"\"\"Sets correct shapes corresponding to TransformFeatures.\n\n Args:\n shapes: A `NestedMap` of TensorShapes, corresponding to the\n pre-transformed features.\n\n Returns:\n A `NestedMap` of TensorShapes corresponding to the transformed features.\n \"\"\"\n raise NotImplementedError()\n\n def TransformDTypes(self, dtypes):\n \"\"\"Sets correct dtypes corresponding to TransformFeatures.\n\n Args:\n dtypes: A `NestedMap` of DTypes, corresponding to the pre-transformed\n features.\n\n Returns:\n A `NestedMap` of DTypes corresponding to the transformed features.\n \"\"\"\n raise NotImplementedError()\n\n\nclass EntryPreprocessor(Preprocessor):\n \"\"\"A Preprocessor that transforms a NestedMap sub-structure.\n\n Some preprocessors want to apply a function to any NestedMap whose key matches\n a specific prefix. An EntryPreprocessor provides an interface for specifying\n the function transformation for a NestedMap of inputs, adding, modifying, or\n deleting the entries in that NestedMap.\n\n For example, if an input contains a nested structure such as:\n - lasers.front.xyz\n .features\n - lasers.side.xyz\n .features\n\n and one wants to apply a transform that modifies the .xyz features\n on both structures, one can define an EntryPreprocessor that implements:\n\n UpdateEntry(entry):\n UpdateEntryShape(shapes):\n UpdateEntryDType(dtypes):\n\n and set self.params.prefixes = ['lasers.front', 'lasers.side']\n where the prefixes refer to a fully-qualified NestedMap sub-structure.\n\n The arguments to these functions will contain just the NestedMap structure\n whose key prefix can be found in self.params.prefixes. One can then modify\n these structures as desired.\n\n Example:\n def UpdateEntry(self, entry):\n # entry is a NestedMap.\n assert 'xyz' in entry\n entry.xyz = self._ApplyFn(entry.xyz)\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('prefixes', ['pseudo_ri'], 'List of keys to apply to.')\n return p\n\n def _ApplyToMatchingStructure(self, nested_map, fn):\n \"\"\"Apply fn to any NestedMap sub-structure whose prefix is in p.prefixes.\"\"\"\n p = self.params\n # Don't mutate the original.\n nested_map = nested_map.DeepCopy()\n updated_entries = []\n for prefix in p.prefixes:\n entry = nested_map.GetItem(prefix)\n if not isinstance(entry, py_utils.NestedMap):\n raise TypeError('Prefix key {} selected a {}, not a NestedMap!'.format(\n prefix, type(entry)))\n fn(entry)\n updated_entries.append(entry)\n return nested_map, updated_entries\n\n def UpdateEntry(self, entry):\n \"\"\"Update the Tensors in a NestedMap entry.\n\n Args:\n entry: A NestedMap of Tensors.\n \"\"\"\n raise NotImplementedError()\n\n def UpdateEntryShape(self, shapes):\n \"\"\"Update the shapes in a NestedMap entry.\n\n Args:\n shapes: A NestedMap of TensorShapes.\n \"\"\"\n raise NotImplementedError()\n\n def UpdateEntryDType(self, dtypes):\n \"\"\"Transform the dtypes in a NestedMap entry.\n\n Args:\n dtypes: A NestedMap of dtypes.\n \"\"\"\n raise NotImplementedError()\n\n def TransformFeatures(self, features):\n features, _ = self._ApplyToMatchingStructure(features, self.UpdateEntry)\n return features\n\n def TransformShapes(self, shapes):\n shapes, _ = self._ApplyToMatchingStructure(shapes, self.UpdateEntryShape)\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes, _ = self._ApplyToMatchingStructure(dtypes, self.UpdateEntryDType)\n return dtypes\n\n\nclass CreateDecoderCopy(Preprocessor):\n \"\"\"Creates references to current lasers, images, and labels.\n\n This is useful if the data is further transformed.\n\n If desired, the keys that are copied can be customized by overriding the\n default keys param.\n\n This preprocessor expects features to optionally contain the following keys:\n - lasers - a NestedMap of tensors\n - images - a NestedMap of tensors\n - labels - a NestedMap of tensors\n\n Adds the following features (if the features existed):\n - decoder_copy.lasers - a copy of the lasers NestedMap\n - decoder_copy.images - a copy of the images NestedMap\n - decoder_copy.labels - a copy of the labels NestedMap\n\n The processor also by default pads the laser features; this can be disabled\n by setting the pad_lasers param to None.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keys', ['lasers', 'labels', 'images'],\n 'Keys to look for and copy if exists.')\n p.Define('parent_key', 'decoder_copy', 'The key to nest the copies under.')\n p.Define('pad_lasers', PadLaserFeatures.Params(),\n 'Params for a layer that pads the laser features.')\n p.name = 'create_decoder_copy'\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.pad_lasers is not None:\n self.CreateChild('pad_lasers', p.pad_lasers)\n\n def _DeepCopyIfExists(self, keys, nested_map, parent_key):\n \"\"\"Deep copy a specific key to a parent key if it exists.\"\"\"\n for key in keys:\n if key in nested_map:\n if parent_key not in nested_map:\n nested_map[parent_key] = py_utils.NestedMap()\n nested_map[parent_key][key] = nested_map[key].DeepCopy()\n return nested_map\n\n def TransformFeatures(self, features):\n p = self.params\n features = self._DeepCopyIfExists(p.keys, features, p.parent_key)\n if p.pad_lasers is not None:\n features[p.parent_key] = self.pad_lasers.TransformFeatures(\n features[p.parent_key])\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes = self._DeepCopyIfExists(p.keys, shapes, p.parent_key)\n if p.pad_lasers is not None:\n shapes[p.parent_key] = self.pad_lasers.TransformShapes(\n shapes[p.parent_key])\n return shapes\n\n def TransformDTypes(self, dtypes):\n p = self.params\n dtypes = self._DeepCopyIfExists(p.keys, dtypes, p.parent_key)\n if p.pad_lasers is not None:\n dtypes[p.parent_key] = self.pad_lasers.TransformDTypes(\n dtypes[p.parent_key])\n return dtypes\n\n\nclass FilterByKey(Preprocessor):\n \"\"\"Filters features to keep only specified keys.\n\n This keeps only feature entries that are specified. This allows us to reduce\n the number of fields returned. For example, during training, one may not\n need the actual laser points if training with a pillars based model that\n has a preprocessor that already maps the points to grid.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'keep_key_prefixes', [''], 'Prefixes of keys to keep. If this '\n 'contains the empty string, then it will keep all the keys.')\n return p\n\n def _FilterFn(self, key, entry):\n \"\"\"Filter a nested map.\"\"\"\n del entry # unused\n p = self.params\n for prefix in p.keep_key_prefixes:\n if key.startswith(prefix):\n return True\n return False\n\n def TransformFeatures(self, features):\n return features.FilterKeyVal(self._FilterFn)\n\n def TransformShapes(self, shapes):\n return shapes.FilterKeyVal(self._FilterFn)\n\n def TransformDTypes(self, dtypes):\n return dtypes.FilterKeyVal(self._FilterFn)\n\n\nclass FilterGroundTruthByNumPoints(Preprocessor):\n \"\"\"Removes ground truth boxes with less than params.min_num_points points.\n\n This preprocessor expects features to contain the following keys::\n labels.labels of shape [..., L]\n labels.bboxes_3d of shape [..., L, 7]\n labels.bboxes_3d_mask of shape [..., L]\n labels.unfiltered_bboxes_3d_mask of shape [..., L]\n labels.bboxes_3d_num_points of shape [..., L].\n\n Modifies the bounding box data to turn off ground truth objects that don't\n meet the params.min_num_points point filter:\n\n labels.labels: Boxes with less than params.min_num_points have their label\n set to params.background_id (defaults to 0).\n\n labels.bboxes_3d_mask: Boxes with less than params.min_num_points are set\n to 0.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'min_num_points', 1, 'The minimum number of points allowed before '\n 'the associated ground truth box is turned off. Defaults to 1.')\n p.Define(\n 'background_id', 0, 'The ID of the background class we set '\n 'filtered boxes to. Defaults to 0.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n bbox_is_valid = tf.greater_equal(features.labels.bboxes_3d_num_points,\n p.min_num_points)\n features.labels.labels = tf.where(\n bbox_is_valid, features.labels.labels,\n p.background_id * tf.ones_like(features.labels.labels))\n features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass FilterGroundTruthByDifficulty(Preprocessor):\n \"\"\"Removes groundtruth boxes based on detection difficulty.\n\n This preprocessor expects features to contain the following keys::\n labels.single_frame_detection_difficulties of shape [..., L]\n labels.labels of shape [..., L]\n labels.bboxes_3d_mask of shape [..., L]\n labels.unfiltered_bboxes_3d_mask of shape [..., L]\n\n The preprocessor masks out the bboxes_3d_mask / labels based on whether\n single_frame_detection_difficulties is greater than p.difficulty_threshold.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'background_id', 0, 'The ID of the background class we set '\n 'filtered boxes to. Defaults to 0.')\n p.Define(\n 'difficulty_threshold', 1,\n 'Filter groundtruth bounding boxes whose detection difficulty is '\n 'greater than `difficulty_threshold`')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n bbox_is_valid = tf.less_equal(\n features.labels.single_frame_detection_difficulties,\n p.difficulty_threshold)\n features.labels.labels = tf.where(\n bbox_is_valid, features.labels.labels,\n p.background_id * tf.ones_like(features.labels.labels))\n features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass CountNumberOfPointsInBoxes3D(Preprocessor):\n \"\"\"Computes bboxes_3d_num_points.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n - labels.bboxes_3d_mask of shape [L]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Adds the following features:\n labels.bboxes_3d_num_points: [L] - integer tensor containing the number of\n laser points for each corresponding bbox.\n \"\"\"\n\n def TransformFeatures(self, features):\n points_xyz = features.lasers.points_xyz\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n\n points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz,\n features.labels.bboxes_3d)\n bboxes_3d_num_points = tf.reduce_sum(\n tf.cast(points_in_bboxes_mask, tf.int32), axis=0, keepdims=False)\n bboxes_3d_num_points *= tf.cast(features.labels.bboxes_3d_mask, tf.int32)\n\n features.labels.bboxes_3d_num_points = bboxes_3d_num_points\n return features\n\n def TransformShapes(self, shapes):\n num_bboxes = shapes.labels.bboxes_3d[0]\n shapes.labels.bboxes_3d_num_points = tf.TensorShape([num_bboxes])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.labels.bboxes_3d_num_points = tf.int32\n return dtypes\n\n\nclass AddPerPointLabels(Preprocessor):\n \"\"\"Computes the class and bbox id of each point.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n - labels.labels of shape [L]\n\n This makes an assumption that each point is only in 1 box, which should\n almost always true in 3D. In cases where this is not true, the largest\n label integer and largest bbox_id will be assigned.\n\n NOTE: Be very careful that this is performed after any modifications\n to the semantic labels of each point in the pointcloud. Examples of this\n would be operators like GroundTruthAugmentation, or DropBoxesOutOfRange.\n\n Adds the following features:\n lasers.points_label: [P] - integer tensor containing the class id of each\n point.\n lasers.points_bbox_id: [P] - integer tensor containing box id of each\n point from 0 to num_bboxes, where an id of num_bboxes indicates a\n background point.\n lasers.points_bbox_3d: [P, 7] - float tensor containing bounding box of\n each point.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'per_dimension_adjustment', None,\n 'A list of len 3 of floats with the amount (in meters) to add to '\n 'each dimension of the box before using it to select points. '\n 'If enabled, this is designed to protect against overly tight box '\n 'annotations that appear in KITTI.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n points_xyz = features.lasers.points_xyz\n bboxes_3d = features.labels.bboxes_3d\n num_points, _ = py_utils.GetShape(points_xyz)\n num_bboxes, _ = py_utils.GetShape(bboxes_3d)\n\n if p.per_dimension_adjustment:\n if len(p.per_dimension_adjustment) != 3:\n raise ValueError(\n 'param `per_dimension_adjustment` expected to be len 3.')\n dims_adjustment = tf.constant([0, 0, 0] + p.per_dimension_adjustment +\n [0])\n bboxes_3d = bboxes_3d + dims_adjustment\n\n # Find which points are in each box and what class each box is.\n points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz, bboxes_3d)\n points_in_bboxes_mask = tf.cast(points_in_bboxes_mask, tf.int32)\n points_in_bboxes_mask = py_utils.HasShape(points_in_bboxes_mask,\n [num_points, num_bboxes])\n\n # points_in_bboxes_mask is a [num_points, num_bboxes] 0/1 tensor\n # indicating whether that point is in a given box.\n # Each point should only be in one box, so after broadcasting the label\n # across the binary mask, we do a reduce_max to get the max label id\n # for each point. Since each point only belongs to one box, it will be\n # the only non-zero (background) label in that box.\n # Note: We assume background to be class_id == 0\n points_label = tf.reduce_max(\n points_in_bboxes_mask * features.labels.labels, axis=1)\n points_bbox_id = tf.argmax(\n points_in_bboxes_mask, axis=1, output_type=tf.int32)\n # If the class is background, make its id == num_bboxes\n points_bbox_id = tf.where(points_label > 0, points_bbox_id,\n tf.broadcast_to(num_bboxes, [num_points]))\n\n # For each point, get the bbox_3d data.\n dummy_bbox = tf.constant([[0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)\n bboxes_3d = tf.concat([bboxes_3d, dummy_bbox], axis=0)\n points_bbox_3d = tf.gather(bboxes_3d, points_bbox_id)\n\n points_label = tf.reshape(points_label, [num_points])\n points_bbox_id = tf.reshape(points_bbox_id, [num_points])\n features.lasers.points_label = points_label\n features.lasers.points_bbox_id = points_bbox_id\n features.lasers.points_bbox_3d = points_bbox_3d\n return features\n\n def TransformShapes(self, shapes):\n num_points = shapes.lasers.points_xyz[0]\n shapes.lasers.points_label = tf.TensorShape([num_points])\n shapes.lasers.points_bbox_id = tf.TensorShape([num_points])\n shapes.lasers.points_bbox_3d = tf.TensorShape([num_points, 7])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.lasers.points_label = tf.int32\n dtypes.lasers.points_bbox_id = tf.int32\n dtypes.lasers.points_bbox_3d = tf.float32\n return dtypes\n\n\nclass PointsToGrid(Preprocessor):\n \"\"\"Bins points to a 3D-grid using custom op: ops.point_to_grid.\n\n Expects features to have keys:\n - lasers.points_xyz of shape [P, 3]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n If normalizing the labels is enabled, then also expects:\n - labels.weights\n - labels.bboxes_td\n - labels.bboxes_td_mask\n - labels.bboxes_3d_mask\n\n Let:\n gx, gy, gz = p.grid_size\n F = 3 + num_laser_features\n\n Adds the following features:\n grid_centers: [gx, gy, gz, 3]: For each grid cell, the (x,y,z)\n floating point coordinate of its center.\n grid_num_points: [gx, gy, gz]: The number of points in each grid\n cell (integer).\n laser_grid: [gx, gy, gz, num_points_per_cell, F] - A 5D floating\n point Tensor containing the laser data placed into a fixed grid.\n\n Modifies the bboxes in labels to also be within the grid range x/y by default.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_points_per_cell', 100,\n 'The maximum number of points per cell.')\n p.Define('grid_size', (40, 40, 1), 'Grid size along x,y,z axis.')\n\n # The max range of x and y is [-80, 80].\n p.Define('grid_range_x', (-80, 80), 'The X-axis Range covered by the grid')\n p.Define('grid_range_y', (-80, 80), 'The Y-axis Range covered by the grid')\n p.Define('grid_range_z', (-2, 4), 'The Z-axis Range covered by the grid')\n\n p.Define('normalize_td_labels', True,\n 'Whether to clip the labels to the grid limits.')\n return p\n\n def _NormalizeLabels(self, ymin, xmin, ymax, xmax, x_range, y_range):\n \"\"\"Normalizes the bboxes within a given range.\"\"\"\n assert x_range, 'Must specify x_range if clipping.'\n assert y_range, 'Must specify y_range if clipping.'\n assert len(x_range) == 2, 'x_range %s must be 2 elements.' % x_range\n assert len(y_range) == 2, 'y_range %s must be 2 elements.' % y_range\n\n x_range_min = x_range[0]\n x_range_len = x_range[1] - x_range[0]\n y_range_min = y_range[0]\n y_range_len = y_range[1] - y_range[0]\n\n xmin = tf.cast(xmin - x_range_min, tf.float32) / tf.cast(\n x_range_len, tf.float32)\n xmax = tf.cast(xmax - x_range_min, tf.float32) / tf.cast(\n x_range_len, tf.float32)\n ymin = tf.cast(ymin - y_range_min, tf.float32) / tf.cast(\n y_range_len, tf.float32)\n ymax = tf.cast(ymax - y_range_min, tf.float32) / tf.cast(\n y_range_len, tf.float32)\n\n return ymin, xmin, ymax, xmax\n\n def TransformFeatures(self, features):\n p = self.params\n\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n if ('points_padding' in features.lasers and\n features.lasers.points_padding is not None):\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n points_feature = tf.boolean_mask(points_feature, points_mask)\n\n points_full = tf.concat([points_xyz, points_feature], axis=-1)\n points_grid_full, grid_centers, num_points = ops.point_to_grid(\n points_full, p.num_points_per_cell, p.grid_size[0], p.grid_size[1],\n p.grid_size[2], p.grid_range_x, p.grid_range_y, p.grid_range_z)\n\n features.laser_grid = points_grid_full\n features.grid_centers = grid_centers\n features.grid_num_points = num_points\n\n if p.normalize_td_labels:\n # Normalize bboxes_td w.r.t grid range.\n obb = features.labels\n x_range = p.grid_range_x\n y_range = p.grid_range_y\n ymin, xmin, ymax, xmax = tf.unstack(obb.bboxes_td[..., :4], axis=-1)\n ymin, xmin, ymax, xmax = self._NormalizeLabels(\n ymin, xmin, ymax, xmax, x_range=x_range, y_range=y_range)\n obb.bboxes_td = tf.concat(\n [tf.stack([ymin, xmin, ymax, xmax], axis=-1), obb.bboxes_td[..., 4:]],\n axis=-1)\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes.grid_centers = tf.TensorShape(list(p.grid_size) + [3])\n shapes.grid_num_points = tf.TensorShape(list(p.grid_size))\n shapes.laser_grid = tf.TensorShape(\n list(p.grid_size) +\n [p.num_points_per_cell, 3 + shapes.lasers.points_feature[-1]])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.grid_centers = tf.float32\n dtypes.grid_num_points = tf.int32\n dtypes.laser_grid = tf.float32\n return dtypes\n\n\nclass _PointPillarGridSettings:\n \"\"\"Settings for PointPillars model defined in paper.\n\n https://arxiv.org/abs/1812.05784\n \"\"\"\n # Chooses grid sizes that are a multiple of 16 to support point pillars\n # model requirements. These also happen to match the values\n # in the PointPillars paper (voxel width of 0.16m in x, y)\n GRID_X = 432\n GRID_Y = 496\n GRID_Z = 1\n\n # These fields are set in the subclasses.\n GRID_X_RANGE = None\n GRID_Y_RANGE = None\n GRID_Z_RANGE = None\n\n @classmethod\n def UpdateGridParams(cls, grid_params):\n \"\"\"Apply PointPillars settings to grid_params.\"\"\"\n grid_params.grid_size = (cls.GRID_X, cls.GRID_Y, cls.GRID_Z)\n grid_params.grid_range_x = cls.GRID_X_RANGE\n grid_params.grid_range_y = cls.GRID_Y_RANGE\n grid_params.grid_range_z = cls.GRID_Z_RANGE\n\n @classmethod\n def UpdateAnchorGridParams(cls, anchor_params, output_stride=2):\n \"\"\"Apply PointPillars settings to anchor_params.\"\"\"\n # Set anchor settings to match grid settings.\n # Grid size for anchors is half the resolution.\n anchor_params.grid_size = (cls.GRID_X // output_stride,\n cls.GRID_Y // output_stride, cls.GRID_Z)\n anchor_params.grid_range_x = cls.GRID_X_RANGE\n anchor_params.grid_range_y = cls.GRID_Y_RANGE\n # Grid along z axis should be pinned to 0.\n anchor_params.grid_range_z = (0, 0)\n\n\ndef MakeGridSettings(grid_x_range, grid_y_range, grid_z_range, grid_x, grid_y,\n grid_z):\n \"\"\"Returns configured class for PointPillar grid settings.\"\"\"\n\n class GridSettings(_PointPillarGridSettings):\n GRID_X_RANGE = grid_x_range\n GRID_Y_RANGE = grid_y_range\n GRID_Z_RANGE = grid_z_range\n GRID_X = grid_x\n GRID_Y = grid_y\n GRID_Z = grid_z\n\n return GridSettings\n\n\nPointPillarGridCarSettings = MakeGridSettings(\n grid_x_range=(0, 69.12),\n grid_y_range=(-39.68, 39.68),\n grid_z_range=(-3, 1),\n grid_x=432,\n grid_y=496,\n grid_z=1)\n\nPointPillarGridPedCycSettings = MakeGridSettings(\n grid_x_range=(0, 47.36),\n grid_y_range=(-19.84, 19.84),\n grid_z_range=(-2.5, 0.5),\n grid_x=432,\n grid_y=496,\n grid_z=1)\n\n\nclass GridToPillars(Preprocessor):\n \"\"\"Create pillars from a grid of points.\n\n Expects features to have keys:\n grid_centers: [gx, gy, gz, 3]\n\n grid_num_points: [gx, gy, gz]\n\n laser_grid: [gx, gy, gz, num_points_per_cell, F]\n\n Adds the following features:\n point_count: [num_pillars]. The number of points in the pillar.\n\n point_locations: [num_pillars, 3]. The grid location of each pillar.\n\n pillar_points: [num_pillars, num_points_per_cell, F]. Points of each\n pillar.\n\n Drops the following features by default:\n laser_grid\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_points_per_cell', 100,\n 'The maximum number of points per cell.')\n p.Define('num_pillars', 12000, 'The maximum number of pillars to produce.')\n p.Define('drop_laser_grid', True, 'Whether to drop the laser_grid feature.')\n # The density based sampler is more expensive.\n p.Define('use_density_sampler', False,\n 'Use a density based sampler during pillar selection.')\n return p\n\n def _GumbelTransform(self, probs):\n \"\"\"Adds gumbel noise to log probabilities for multinomial sampling.\n\n This enables fast sampling from a multinomial distribution without\n replacement. See https://arxiv.org/abs/1611.01144 for details.\n A colab that demonstrates this in practice is here:\n http://colab/drive/1iuMt2n_r7dKPQG9T0UVMuK3fkbBayKjd\n\n Args:\n probs: A 1-D float tensor containing probabilities, summing to 1.\n\n Returns:\n A 1-D float tensor of the same size of probs, with gumbel noise added to\n log probabilities. Taking the top k elements from this provides a\n multinomial sample without replacement.\n \"\"\"\n p = self.params\n log_prob = tf.math.log(probs)\n probs_shape = tf.shape(probs)\n uniform_samples = tf.random.uniform(\n shape=probs_shape,\n dtype=probs.dtype,\n seed=p.random_seed,\n name='uniform_samples')\n gumbel_noise = -tf.math.log(-tf.math.log(uniform_samples))\n return gumbel_noise + log_prob\n\n def _DensitySample(self, num_points):\n p = self.params\n\n # Flatten to [nx * ny * nz] for convenience during sampling.\n num_grid_points = np.prod(p.grid_size)\n flattened_num_points = tf.reshape(num_points, [num_grid_points])\n\n # Normalize flattened_num_points to sum to 1.\n flattened_num_points = tf.cast(flattened_num_points, tf.float32)\n flattened_num_points /= tf.reduce_sum(flattened_num_points)\n\n # TODO(jngiam): Consider generalizing this to enable other methods of\n # sampling: e.g., use largest deviation in z-axis. The gumbel transform\n # can still be applied regardless.\n\n # Add gumbel noise for multinomial sampling.\n sampling_logits = self._GumbelTransform(flattened_num_points)\n _, locations = tf.nn.top_k(\n sampling_logits, k=min(p.num_pillars, num_grid_points))\n\n # Unravel coordinates back to grid locations.\n locations = tf.unravel_index(locations, p.grid_size)\n\n # Unravel index will return a 3 x num_locations tensor, this needs to be\n # transposed so that we have it as num_locations x 3.\n locations = py_utils.HasShape(locations, [3, -1])\n locations = tf.transpose(locations)\n\n return locations\n\n def TransformFeatures(self, features):\n p = self.params\n\n num_points = features.grid_num_points\n if p.use_density_sampler:\n locations = self._DensitySample(num_points)\n else:\n # Select non-empty cells uniformly at random.\n locations = tf.random.shuffle(tf.cast(tf.where(num_points > 0), tf.int32))\n\n num_features = py_utils.GetShape(features.laser_grid)[-1]\n\n # [nx, ny, nz, np, 4] (x, y, z, f)\n points = features.laser_grid\n # [K, np, 4] (x, y, z, f)\n points = tf.gather_nd(points, locations)\n # [nx, ny, nz, 1, 3] (cx, cy, cz)\n centers = features.grid_centers[..., tf.newaxis, :]\n # [K, 1, 3] (cx, cy, cz)\n centers = tf.gather_nd(centers, locations)\n # NOTE: If there are fewer pillars than p.num_pillars, the following\n # padding creates many 'fake' pillars at grid cell (0, 0, 0) with\n # an all-zero pillar. Hopefully, the model can learn to ignore these.\n #\n # pillar_points[i, :, :] is the pillar located at pillar_locations[i, :3],\n # and pillar_points[i, :, :] == points_grid_full[pillar_locations[i, :3]].\n # for 0 <= i < pillar_count;\n # pillar_locations[i, :3] are zero-ed, for i >= pillar_count.\n features.pillar_count = tf.shape(locations)[0]\n features.pillar_locations = py_utils.PadOrTrimTo(locations,\n [p.num_pillars, 3])\n features.pillar_points = py_utils.PadOrTrimTo(\n points, [p.num_pillars, p.num_points_per_cell, num_features])\n features.pillar_centers = py_utils.PadOrTrimTo(centers,\n [p.num_pillars, 1, 3])\n\n if p.drop_laser_grid:\n del features['laser_grid']\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n num_features = shapes.laser_grid[-1]\n shapes.pillar_count = tf.TensorShape([])\n shapes.pillar_locations = tf.TensorShape([p.num_pillars, 3])\n shapes.pillar_points = tf.TensorShape(\n [p.num_pillars, p.num_points_per_cell, num_features])\n shapes.pillar_centers = tf.TensorShape([p.num_pillars, 1, 3])\n if p.drop_laser_grid:\n del shapes['laser_grid']\n return shapes\n\n def TransformDTypes(self, dtypes):\n p = self.params\n dtypes.pillar_count = tf.int32\n dtypes.pillar_locations = tf.int32\n dtypes.pillar_points = tf.float32\n dtypes.pillar_centers = tf.float32\n if p.drop_laser_grid:\n del dtypes['laser_grid']\n return dtypes\n\n\nclass GridAnchorCenters(Preprocessor):\n \"\"\"Create anchor centers on a grid.\n\n Anchors are placed in the middle of each grid cell. For example, on a 2D grid\n range (0 -> 10, 0 -> 10) with a 10 x 5 grid size, the anchors will be placed\n at [(0.5, 1), (0.5, 3), ... , (9.5, 7), (9.5, 9)].\n\n Adds the following features:\n anchor_centers: [num_locations, 3] - Floating point output containing the\n center (x, y, z) locations for tiling anchor boxes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'grid_size', (20, 20, 1), 'Grid size along x,y,z axis. This will '\n 'be used to generate the anchor center locations. Note that this '\n 'would likely be different from the grid_* parameters in '\n 'LaserGridExtractor: the grid extractor may choose to extract '\n 'points more densely. Instead, this should correspond to the '\n 'model\\'s prediction layer: the predicted anchor box residuals '\n 'should match this grid.')\n p.Define('grid_range_x', (-25, 25), 'The x-axis range covered by the grid.')\n p.Define('grid_range_y', (-25, 25), 'The y-axis range covered by the grid.')\n p.Define('grid_range_z', (0, 0), 'The z-axis range covered by the grid.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n utils_3d = detection_3d_lib.Utils3D()\n\n # Compute the grid cell size and adjust the range sent to dense coordinates\n # by half a cell size so as to ensure that the anchors are placed in the\n # center of each grid cell.\n grid_size_x, grid_size_y, grid_size_z = p.grid_size\n grid_cell_sizes = [\n float(p.grid_range_x[1] - p.grid_range_x[0]) / grid_size_x,\n float(p.grid_range_y[1] - p.grid_range_y[0]) / grid_size_y,\n float(p.grid_range_z[1] - p.grid_range_z[0]) / grid_size_z,\n ]\n half_size_x, half_size_y, half_size_z = np.asarray(grid_cell_sizes) / 2.0\n\n grid_shape = list(p.grid_size) + [3]\n anchor_centers = utils_3d.CreateDenseCoordinates([\n [\n p.grid_range_x[0] + half_size_x,\n p.grid_range_x[1] - half_size_x,\n grid_size_x\n ],\n [\n p.grid_range_y[0] + half_size_y,\n p.grid_range_y[1] - half_size_y,\n grid_size_y\n ],\n [\n p.grid_range_z[0] + half_size_z,\n p.grid_range_z[1] - half_size_z,\n grid_size_z\n ],\n ]) # pyformat: disable\n features.anchor_centers = tf.reshape(anchor_centers, grid_shape)\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes.anchor_centers = tf.TensorShape(list(p.grid_size) + [3])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_centers = tf.float32\n return dtypes\n\n\nclass SparseCenterSelector(Preprocessor):\n \"\"\"Select centers for anchors and cells.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n If lasers.num_seeded_points of shape [] is provided, it indicates that the\n first num_seeded_points of lasers.points_xyz should be used as seeds for\n farthest point sampling (e.g., always chosen). Currently the concept\n of seeding is not implemented for anything but farthest point sampling.\n\n Adds the following features:\n anchor_centers: [num_cell_centers, 3] - Floating point output containing the\n center (x, y, z) locations for tiling anchor boxes.\n cell_center_xyz: [num_cell_centers, 3] - Floating point output containing\n the center (x, y, z) locations for each cell to featurize.\n \"\"\"\n\n _SAMPLING_METHODS = ['farthest_point', 'random_uniform']\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_cell_centers', 256, 'Number of centers.')\n p.Define(\n 'features_preparation_layers', [],\n 'A list of Params for layers to run on the features before '\n 'performing farthest point sampling. For example, one may wish to '\n 'drop points out of frustum for KITTI before selecting centers. '\n 'Note that these layers will not mutate the original features, '\n 'instead, a copy will be made.')\n p.Define(\n 'sampling_method', 'farthest_point',\n 'Which sampling method to use. One of {}'.format(cls._SAMPLING_METHODS))\n p.Define(\n 'fix_z_to_zero', True, 'Whether to fix z to 0 when retrieving the '\n 'center xyz coordinates.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n\n if p.sampling_method not in self._SAMPLING_METHODS:\n raise ValueError('Param `sampling_method` must be one of {}.'.format(\n self._SAMPLING_METHODS))\n if p.features_preparation_layers is not None:\n self.CreateChildren('features_preparation_layers',\n p.features_preparation_layers)\n\n def _FarthestPointSampleCenters(self, points_xyz, num_seeded_points):\n \"\"\"Samples centers with Farthest Point Sampling.\n\n Args:\n points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point\n (x, y, z) locations. We expect any padded points to be removed before\n this function is called.\n num_seeded_points: integer indicating how many of the first\n num_seeded_points points in points_xyz should be considered\n as seeds for FPS (always chosen).\n\n Returns:\n A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers\n to use as anchors.\n \"\"\"\n p = self.params\n num_points = tf.shape(points_xyz)[0]\n points_padding = tf.zeros((num_points,), dtype=tf.float32)\n padded_num_points = tf.maximum(num_points, p.num_cell_centers)\n\n # Pad both the points and padding if for some reason the input pointcloud\n # has less points than p.num_cell_centers.\n points_xy = py_utils.PadOrTrimTo(points_xyz[:, :2], [padded_num_points, 2])\n points_padding = py_utils.PadOrTrimTo(\n points_padding, [padded_num_points], pad_val=1.0)\n\n sampled_idx, _ = car_lib.FarthestPointSampler(\n points_xy[tf.newaxis, ...],\n points_padding[tf.newaxis, ...],\n p.num_cell_centers,\n num_seeded_points=num_seeded_points,\n random_seed=p.random_seed)\n sampled_idx = sampled_idx[0, :]\n\n # Gather centers.\n if p.fix_z_to_zero:\n centers = tf.concat([\n tf.gather(points_xy, sampled_idx),\n tf.zeros((p.num_cell_centers, 1)),\n ], axis=-1) # pyformat: disable\n else:\n centers = tf.gather(points_xyz, sampled_idx)\n\n return centers\n\n def _RandomUniformSampleCenters(self, points_xyz):\n \"\"\"Samples centers with Random Uniform Sampling.\n\n Args:\n points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point\n (x, y, z) locations. We expect any padded points to be removed before\n this function is called.\n\n Returns:\n A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers\n to use as anchors.\n \"\"\"\n p = self.params\n # We want the center Z value to be 0 so just exclude it\n centers_xy = tf.random.shuffle(points_xyz[:, :2], seed=p.random_seed)\n selected_centers_xy = py_utils.PadOrTrimTo(centers_xy,\n [p.num_cell_centers, 2])\n return tf.concat([selected_centers_xy,\n tf.zeros((p.num_cell_centers, 1))],\n axis=-1)\n\n def _SampleCenters(self, points_xyz, num_seeded_points):\n p = self.params\n if p.sampling_method == 'farthest_point':\n return self._FarthestPointSampleCenters(points_xyz, num_seeded_points)\n elif p.sampling_method == 'random_uniform':\n if num_seeded_points > 0:\n raise NotImplementedError(\n 'Random sampling with seeded points not yet implemented.')\n return self._RandomUniformSampleCenters(points_xyz)\n else:\n raise ValueError('Param `sampling_method` must be one of {}.'.format(\n self._SAMPLING_METHODS))\n\n def TransformFeatures(self, features):\n p = self.params\n\n prepared_features = features.DeepCopy()\n for prep_layer in self.features_preparation_layers:\n prepared_features = prep_layer.FPropDefaultTheta(prepared_features)\n\n num_seeded_points = prepared_features.lasers.get('num_seeded_points', 0)\n points_data = prepared_features.lasers\n\n points_xyz = points_data.points_xyz\n if 'points_padding' in points_data:\n points_padding = points_data.points_padding\n points_mask = 1 - points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n\n centers = self._SampleCenters(points_xyz, num_seeded_points)\n centers = py_utils.HasShape(centers, [p.num_cell_centers, 3])\n\n features.anchor_centers = centers\n features.cell_center_xyz = centers\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n shapes.anchor_centers = tf.TensorShape([p.num_cell_centers, 3])\n shapes.cell_center_xyz = tf.TensorShape([p.num_cell_centers, 3])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_centers = tf.float32\n dtypes.cell_center_xyz = tf.float32\n return dtypes\n\n\nclass SparseCellGatherFeatures(Preprocessor):\n \"\"\"Select local features for each cell.\n\n This preprocessor expects features to contain:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n - cell_center_xyz of shape [C, 3]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Adds the following features:\n cell_points_xyz: [num_centers, num_points_per_cell, 3] - Floating point\n output containing the (x, y, z) locations for each point for a given\n center.\n cell_feature: [num_centers, num_points_per_cell, F] - Floating point output\n containing the features for each point for a given center.\n cell_points_padding: [num_centers, num_points_per_cell] - 0/1 padding\n for the points in each cell.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_points_per_cell', 128, 'The number of points per cell.')\n p.Define('max_distance', 3.0, 'Max distance of point to cell center.')\n p.Define(\n 'sample_neighbors_uniformly', False,\n 'Whether to sample the neighbor points for every cell center '\n 'uniformly at random. If False, this will default to selecting by '\n 'distance.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n num_centers = py_utils.GetShape(features.cell_center_xyz, 1)[0]\n num_features = py_utils.GetShape(features.lasers.points_feature)[-1]\n\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(points_xyz, points_mask)\n points_feature = tf.boolean_mask(points_feature, points_mask)\n\n # Note: points_xyz and points_feature must be unpadded as we pass\n # padding=None to neighborhood indices. Ensuring that it is unpadded\n # helps improve performance.\n\n # Get nearby points using kNN.\n sample_indices, sample_indices_padding = car_lib.NeighborhoodIndices(\n tf.expand_dims(points_xyz, 0),\n tf.expand_dims(features.cell_center_xyz, 0),\n p.num_points_per_cell,\n points_padding=None,\n max_distance=p.max_distance,\n sample_neighbors_uniformly=p.sample_neighbors_uniformly)\n\n # Take first example since NeighboorhoodIndices expects batch dimension.\n sample_indices = sample_indices[0, :, :]\n sample_indices_padding = sample_indices_padding[0, :, :]\n\n sample_indices = py_utils.HasShape(sample_indices,\n [num_centers, p.num_points_per_cell])\n\n cell_points_xyz = tf.gather(points_xyz, sample_indices)\n cell_points_xyz = py_utils.HasShape(cell_points_xyz,\n [num_centers, p.num_points_per_cell, 3])\n\n cell_feature = tf.gather(points_feature, sample_indices)\n cell_feature = py_utils.HasShape(\n cell_feature, [num_centers, p.num_points_per_cell, num_features])\n\n cell_points_padding = py_utils.HasShape(\n sample_indices_padding, [num_centers, p.num_points_per_cell])\n\n features.update({\n 'cell_points_xyz': cell_points_xyz,\n 'cell_feature': cell_feature,\n 'cell_points_padding': cell_points_padding,\n })\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n num_centers = shapes.cell_center_xyz[0]\n base_shape = [num_centers, p.num_points_per_cell]\n num_features = shapes.lasers.points_feature[-1]\n shapes.cell_points_xyz = tf.TensorShape(base_shape + [3])\n shapes.cell_feature = tf.TensorShape(base_shape + [num_features])\n shapes.cell_points_padding = tf.TensorShape(base_shape)\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.cell_points_xyz = tf.float32\n dtypes.cell_feature = tf.float32\n dtypes.cell_points_padding = tf.float32\n return dtypes\n\n\nclass SparseCellCentersTopK(Preprocessor):\n \"\"\"Given selected centers and gathered points/features, apply a filter.\n\n This preprocessor expects features to contain `cell_center_xyz` and all\n entries in params.features_to_modify, and that the leading dimension should\n all be the same (num_cell_centers from SparseCenterSelector).\n\n We then modify all values in features that are specified in\n params.features_to_modify by sorting them with the specified sort function\n (specified by params.sort_by) operating on features.cell_center_xyz, and then\n taking the top K (specified by params.num_cell_centers) along the first\n dimension.\n \"\"\"\n\n _REGISTERED_SORT_FUNCTIONS = ['distance']\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('num_cell_centers', 512, 'The number of centers after filtering.')\n p.Define(\n 'sort_by', 'distance', 'A string specifying which sort function '\n 'to use. Currently we just support `distance`.')\n p.Define('features_to_modify', [\n 'cell_center_xyz', 'anchor_centers', 'cell_points_xyz', 'cell_feature',\n 'cell_points_padding'\n ], 'A list of keys from the features dict to modify.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.sort_by not in self._REGISTERED_SORT_FUNCTIONS:\n raise ValueError('{} not supported. We only support {}.'.format(\n p.sort_by, self._REGISTERED_SORT_FUNCTIONS))\n if len(p.features_to_modify) < 1:\n raise ValueError('Need to modify at least one feature.')\n\n def _SortByDistance(self, features):\n dist = tf.linalg.norm(features.cell_center_xyz, axis=-1)\n return tf.argsort(dist, axis=-1, direction='ASCENDING')\n\n def _Sort(self, features):\n p = self.params\n if p.sort_by == 'distance':\n return self._SortByDistance(features)\n else:\n raise ValueError('Unsupported sort function: {}.'.format(p.sort_by))\n\n def TransformFeatures(self, features):\n p = self.params\n sort_indices = self._Sort(features)\n sort_indices_top_k = sort_indices[:p.num_cell_centers, ...]\n\n # Gather each of the relevant items\n for key in p.features_to_modify:\n shape = py_utils.GetShape(features[key])\n output_shape = [p.num_cell_centers] + shape[1:]\n features[key] = py_utils.PadOrTrimTo(\n tf.gather(features[key], sort_indices_top_k), output_shape)\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n for key in p.features_to_modify:\n shapes[key] = tf.TensorShape([p.num_cell_centers] + shapes[key][1:])\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass TileAnchorBBoxes(Preprocessor):\n \"\"\"Creates anchor_bboxes given anchor_centers.\n\n This preprocessor expects features to contain the following keys:\n - anchor_centers of shape [...base shape..., 3]\n\n Adds the following features:\n anchor_bboxes: base_shape + [7] - Floating point anchor box\n output containing the anchor boxes and the 7 floating point\n values for each box that define the box (x, y, z, dx, dy, dz, phi).\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('anchor_box_dimensions', [],\n 'List of anchor box sizes per center.')\n p.Define('anchor_box_offsets', [], 'List of anchor box offsets per center.')\n p.Define('anchor_box_rotations', [],\n 'List of anchor box rotations per center.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n utils_3d = detection_3d_lib.Utils3D()\n\n assert p.anchor_box_dimensions\n assert p.anchor_box_offsets\n assert p.anchor_box_rotations\n\n base_shape = py_utils.GetShape(features.anchor_centers)[:-1]\n num_box_per_center = len(p.anchor_box_dimensions)\n\n anchor_centers = tf.reshape(features.anchor_centers, [-1, 3])\n anchor_bboxes = utils_3d.MakeAnchorBoxes(\n anchor_centers, tf.identity(p.anchor_box_dimensions),\n tf.identity(p.anchor_box_offsets), tf.identity(p.anchor_box_rotations))\n features.anchor_bboxes = tf.reshape(anchor_bboxes,\n base_shape + [num_box_per_center, 7])\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n base_shape = shapes.anchor_centers[:-1]\n num_box_per_center = len(p.anchor_box_dimensions)\n shapes.anchor_bboxes = base_shape.concatenate([num_box_per_center, 7])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_bboxes = tf.float32\n return dtypes\n\n\nclass _AnchorBoxSettings:\n \"\"\"Helper class to parameterize and update anchor box settings.\"\"\"\n # Implementations should fill out the following class members.\n DIMENSION_PRIORS = []\n ROTATIONS = []\n CENTER_X_OFFSETS = []\n CENTER_Y_OFFSETS = []\n CENTER_Z_OFFSETS = []\n\n @classmethod\n def NumAnchors(cls):\n return np.prod([\n len(cls.DIMENSION_PRIORS),\n len(cls.ROTATIONS),\n len(cls.CENTER_X_OFFSETS),\n len(cls.CENTER_Y_OFFSETS),\n len(cls.CENTER_Z_OFFSETS)\n ])\n\n @classmethod\n def GenerateAnchorSettings(cls):\n \"\"\"Generate anchor settings.\n\n Returns:\n A `NestedMap` containing three lists of the same length:\n - anchor_box_dimensions\n - anchor_box_rotations\n - anchor_box_offsets\n\n These can be used with the TileAnchorBBoxes preprocessor.\n \"\"\"\n anchor_box_dimensions = []\n anchor_box_rotations = []\n anchor_box_offsets = []\n\n # The following is equivalent to a formulation of itertools.product, but\n # is explicitly listed for readability.\n\n # *Please note*: The ordering is important for ModelV2, which makes\n # assumptions that the offset dimensions come first.\n for cx in cls.CENTER_X_OFFSETS:\n for cy in cls.CENTER_Y_OFFSETS:\n for cz in cls.CENTER_Z_OFFSETS:\n for rot in cls.ROTATIONS:\n for dims in cls.DIMENSION_PRIORS:\n anchor_box_dimensions += [dims]\n anchor_box_rotations += [rot]\n anchor_box_offsets += [(cx, cy, cz)]\n\n # Check one of the lists has entries.\n assert anchor_box_dimensions\n\n return py_utils.NestedMap(\n anchor_box_dimensions=anchor_box_dimensions,\n anchor_box_rotations=anchor_box_rotations,\n anchor_box_offsets=anchor_box_offsets)\n\n @classmethod\n def Update(cls, params):\n \"\"\"Updates anchor box settings from input configuration lists.\n\n Given dimensions priors, rotations, and offsets, computes the cartesian\n product of the settings.\n\n Args:\n params: The KITTIAnchorExtractorBase.Params() object to update.\n\n Returns:\n Params updated with the anchor settings.\n\n In total there are N combinations, where each (anchor_box_dimensions[i],\n anchor_box_rotations[i], anchor_box_offsets[i]) for i in range(N) is an\n option.\n \"\"\"\n p = params\n settings = cls.GenerateAnchorSettings()\n p.anchor_box_dimensions = settings.anchor_box_dimensions\n p.anchor_box_rotations = settings.anchor_box_rotations\n p.anchor_box_offsets = settings.anchor_box_offsets\n return p\n\n\ndef MakeAnchorBoxSettings(dimension_priors, rotations, center_x_offsets,\n center_y_offsets, center_z_offsets):\n \"\"\"Returns a configured class for setting anchor box settings.\"\"\"\n\n class CustomAnchorBoxSettings(_AnchorBoxSettings):\n DIMENSION_PRIORS = dimension_priors\n ROTATIONS = rotations\n CENTER_X_OFFSETS = center_x_offsets\n CENTER_Y_OFFSETS = center_y_offsets\n CENTER_Z_OFFSETS = center_z_offsets\n\n return CustomAnchorBoxSettings\n\n\nclass SparseCarV1AnchorBoxSettings(_AnchorBoxSettings):\n \"\"\"Anchor box settings for training on Cars for Sparse models.\"\"\"\n # Borrowed from PointPillar dimension prior for cars.\n DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]\n\n # 4 Rotations with axis aligned and both diagonals.\n ROTATIONS = [0, np.pi / 2, np.pi / 4, 3 * np.pi / 4]\n\n # 25 offsets per anchor box with fixed z offset at -1.\n CENTER_X_OFFSETS = np.linspace(-1.5, 1.5, 5)\n CENTER_Y_OFFSETS = np.linspace(-1.5, 1.5, 5)\n CENTER_Z_OFFSETS = [-1.]\n\n\nclass PointPillarAnchorBoxSettingsCar(_AnchorBoxSettings):\n DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]\n ROTATIONS = [0, np.pi / 2]\n # Fixed offset for every anchor box, based on a reading of the paper / code\n # 0 offsets for x and y, and -1 for z.\n CENTER_X_OFFSETS = [0.]\n CENTER_Y_OFFSETS = [0.]\n CENTER_Z_OFFSETS = [-1.]\n\n\nclass PointPillarAnchorBoxSettingsPed(PointPillarAnchorBoxSettingsCar):\n DIMENSION_PRIORS = [(0.6, 0.8, 1.73)]\n CENTER_Z_OFFSETS = [-0.6]\n\n\nclass PointPillarAnchorBoxSettingsCyc(PointPillarAnchorBoxSettingsCar):\n DIMENSION_PRIORS = [(0.6, 1.76, 1.73)]\n CENTER_Z_OFFSETS = [-0.6]\n\n\nclass PointPillarAnchorBoxSettingsPedCyc(PointPillarAnchorBoxSettingsCar):\n DIMENSION_PRIORS = [(0.6, 0.8, 1.7), (0.6, 1.76, 1.73)]\n CENTER_Z_OFFSETS = [-0.6]\n\n\nclass AnchorAssignment(Preprocessor):\n \"\"\"Perform anchor assignment on the features.\n\n This preprocessor expects features to contain the following keys:\n - anchor_bboxes of shape [...base shape..., 7]\n - labels.bboxes_3d\n - labels.labels\n - labels.bboxes_3d_mask\n\n Adds the following features:\n\n anchor_localization_residuals: base_shape + [7] floating point tensor of\n residuals. The model is expected to regress against these residuals as\n targets. The residuals can be converted back into bboxes using\n detection_3d_lib.Utils3D.ResidualsToBBoxes.\n assigned_gt_idx: base_shape - The corresponding index of the ground\n truth bounding box for each anchor box in anchor_bboxes, anchors not\n assigned will have idx be set to -1.\n assigned_gt_bbox: base_shape + [7] - The corresponding ground\n truth bounding box for each anchor box in anchor_bboxes.\n assigned_gt_labels: base_shape - The assigned groundtruth label\n for each anchor box.\n assigned_gt_similarity_score: base_shape - The similarity score\n for each assigned anchor box.\n assigned_cls_mask: base_shape mask for classification loss per anchor.\n This should be 1.0 if the anchor has a foreground or background\n assignment; otherwise, it will be assigned to 0.0.\n assigned_reg_mask: base_shape mask for regression loss per anchor.\n This should be 1.0 if the anchor has a foreground assignment;\n otherwise, it will be assigned to 0.0.\n Note: background anchors do not have regression targets.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'foreground_assignment_threshold', 0.5,\n 'Score (usually IOU) threshold for assigning a box as foreground.')\n p.Define(\n 'background_assignment_threshold', 0.35,\n 'Score (usually IOU) threshold for assigning a box as background.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n utils_3d = detection_3d_lib.Utils3D()\n\n # anchor_bboxes will be returned with shape [#centers, #boxes_per_center, 7]\n # flatten boxes here for matching.\n base_shape = py_utils.GetShape(features.anchor_bboxes)[:-1]\n anchor_bboxes = tf.reshape(features.anchor_bboxes, [-1, 7])\n\n assigned_anchors = utils_3d.AssignAnchors(\n anchor_bboxes,\n features.labels.bboxes_3d,\n features.labels.labels,\n features.labels.bboxes_3d_mask,\n foreground_assignment_threshold=p.foreground_assignment_threshold,\n background_assignment_threshold=p.background_assignment_threshold)\n\n # Add new features.\n features.assigned_gt_idx = tf.reshape(assigned_anchors.assigned_gt_idx,\n base_shape)\n features.assigned_gt_bbox = tf.reshape(assigned_anchors.assigned_gt_bbox,\n base_shape + [7])\n features.assigned_gt_labels = tf.reshape(\n assigned_anchors.assigned_gt_labels, base_shape)\n features.assigned_gt_similarity_score = tf.reshape(\n assigned_anchors.assigned_gt_similarity_score, base_shape)\n features.assigned_cls_mask = tf.reshape(assigned_anchors.assigned_cls_mask,\n base_shape)\n features.assigned_reg_mask = tf.reshape(assigned_anchors.assigned_reg_mask,\n base_shape)\n\n # Compute residuals.\n features.anchor_localization_residuals = utils_3d.LocalizationResiduals(\n features.anchor_bboxes, features.assigned_gt_bbox)\n\n return features\n\n def TransformShapes(self, shapes):\n base_shape = shapes.anchor_bboxes[:-1]\n box_shape = base_shape.concatenate([7])\n\n shapes.anchor_localization_residuals = box_shape\n shapes.assigned_gt_idx = base_shape\n shapes.assigned_gt_bbox = box_shape\n shapes.assigned_gt_labels = base_shape\n shapes.assigned_gt_similarity_score = base_shape\n shapes.assigned_cls_mask = base_shape\n shapes.assigned_reg_mask = base_shape\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_localization_residuals = tf.float32\n dtypes.assigned_gt_idx = tf.int32\n dtypes.assigned_gt_bbox = tf.float32\n dtypes.assigned_gt_labels = tf.int32\n dtypes.assigned_gt_similarity_score = tf.float32\n dtypes.assigned_cls_mask = tf.float32\n dtypes.assigned_reg_mask = tf.float32\n return dtypes\n\n\nclass DropLaserPointsOutOfRange(Preprocessor):\n \"\"\"Drops laser points that are out of pre-defined x/y/z ranges.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n Removes or sets padding to 1 for all points outside a given range. Modifies\n all items in the lasers subdictionary like lasers.points_xyz,\n lasers.points_feature, lasers.points_padding, and optionally\n lasers.points_label, lasers.points_bbox_id.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_x_range', (-np.inf, np.inf),\n 'Only points that have x coordinates within this range are kept.')\n p.Define('keep_y_range', (-np.inf, np.inf),\n 'Only points that have y coordinates within this range are kept.')\n p.Define(\n 'keep_z_range', (-np.inf, np.inf),\n 'Only points that have z coordinates within this range are kept. '\n 'Approximate ground-removal can be performed by specifying a '\n 'lower-bound on the z-range.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n points_xyz = features.lasers.points_xyz\n if 'points_padding' in features.lasers:\n points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)\n else:\n # All points are real, we keep points unpadded by applying boolean_mask\n # on points_mask later.\n points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)\n\n min_x, max_x = p.keep_x_range\n min_y, max_y = p.keep_y_range\n min_z, max_z = p.keep_z_range\n\n # Short-circuit if all ranges are set to -inf, inf.\n if (np.all(np.isneginf([min_x, min_y, min_z])) and\n np.all(np.isposinf([max_x, max_y, max_z]))):\n return features\n\n if min_x != -np.inf:\n points_mask &= points_xyz[:, 0] >= min_x\n if min_y != -np.inf:\n points_mask &= points_xyz[:, 1] >= min_y\n if min_z != -np.inf:\n points_mask &= points_xyz[:, 2] >= min_z\n\n if max_x != np.inf:\n points_mask &= points_xyz[:, 0] <= max_x\n if max_y != np.inf:\n points_mask &= points_xyz[:, 1] <= max_y\n if max_z != np.inf:\n points_mask &= points_xyz[:, 2] <= max_z\n\n if 'points_padding' in features.lasers:\n # Suffices to just update the padding.\n features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)\n else:\n features.lasers = features.lasers.Transform(\n _GetApplyPointMaskFn(points_mask))\n\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass KITTIDropPointsOutOfFrustum(Preprocessor):\n \"\"\"Drops laser points that are outside of the camera frustum.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n - images.velo_to_image_plane of shape [3, 4]\n - images.width of shape [1]\n - images.height of shape [1]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n lasers.points_xyz, lasers.points_feature, lasers.points_padding, and\n optionally lasers.points_label, lasers.points_bbox_id so that\n points outside the frustum have padding set to 1 or are removed.\n \"\"\"\n\n def TransformFeatures(self, features):\n # Drop points behind the car (behind x-axis = 0).\n images = features.images\n front_indices = features.lasers.points_xyz[:, 0] >= 0\n\n if 'points_padding' not in features.lasers:\n # Keep tensors unpadded and small using boolean_mask.\n features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,\n front_indices)\n features.lasers.points_feature = tf.boolean_mask(\n features.lasers.points_feature, front_indices)\n\n # Drop those points outside the image plane.\n points_image = geometry.PointsToImagePlane(features.lasers.points_xyz,\n images.velo_to_image_plane)\n in_image_plane = (\n (points_image[:, 0] >= 0) &\n (points_image[:, 0] <= tf.cast(images.width, tf.float32)) &\n (points_image[:, 1] >= 0) &\n (points_image[:, 1] <= tf.cast(images.height, tf.float32)))\n\n if 'points_padding' in features.lasers:\n # Update padding to only include front indices and in image plane.\n points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)\n points_mask &= front_indices\n points_mask &= in_image_plane\n features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)\n else:\n features.lasers = features.lasers.Transform(\n _GetApplyPointMaskFn(in_image_plane))\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomWorldRotationAboutZAxis(Preprocessor):\n \"\"\"Rotates the world randomly as a form of data augmentation.\n\n Rotations are performed around the *z-axis*. This assumes that the car is\n always level. In general, we'd like to instead rotate the car on the spot,\n this would then make sense for cases where the car is on a slope.\n\n When there are leading dimensions, this will rotate the boxes with the same\n transformation across all the frames. This is useful when the input is a\n sequence of frames from the same run segment.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [..., 3]\n - labels.bboxes_3d of shape [..., 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same rotation applied to both.\n\n Adds the following features:\n world_rot_z which contains the rotation applied to the example.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'max_rotation', None,\n 'The rotation amount will be randomly picked from '\n '[-max_rotation, max_rotation).')\n p.Define(\n 'include_world_rot_z', True,\n 'Whether to include the applied rotation as an additional tensor. '\n 'It can be helpful to disable this when using the preprocessor in a '\n 'way that expects the structure of the features to be the same '\n '(e.g., as a branch in tf.cond).')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.max_rotation is None:\n raise ValueError('max_rotation needs to be specified, instead of None.')\n\n def TransformFeatures(self, features):\n p = self.params\n rot = tf.random.uniform((),\n minval=-p.max_rotation,\n maxval=p.max_rotation,\n seed=p.random_seed)\n\n # Rotating about the z-axis is equal to experiencing yaw.\n pose = [0., 0., 0., rot, 0., 0.]\n\n # Rotate points.\n features.lasers.points_xyz = geometry.CoordinateTransform(\n features.lasers.points_xyz, pose)\n\n # Rotate bboxes, note that heading has a special case.\n bboxes_xyz = features.labels.bboxes_3d[..., :3]\n bboxes_dims = features.labels.bboxes_3d[..., 3:6]\n bboxes_rot = features.labels.bboxes_3d[..., 6:]\n\n bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)\n\n # The heading correction should subtract rot from the bboxes rotations.\n bboxes_rot = geometry.WrapAngleRad(bboxes_rot - rot)\n\n features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],\n axis=-1)\n if p.include_world_rot_z:\n features.world_rot_z = rot\n return features\n\n def TransformShapes(self, shapes):\n if self.params.include_world_rot_z:\n shapes.world_rot_z = tf.TensorShape([])\n return shapes\n\n def TransformDTypes(self, dtypes):\n if self.params.include_world_rot_z:\n dtypes.world_rot_z = tf.float32\n return dtypes\n\n\nclass DropPointsOutOfFrustum(Preprocessor):\n \"\"\"Drops points outside of pre-defined theta / phi ranges.\n\n Note that the ranges for keep_phi_range can be negative, this is because the\n phi values wrap around 2*pi. Thus, a valid range that filters the 90 deg\n frontal field of view of the car can be specified as [-pi/4, pi/4].\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 7]\n - lasers.points_feature of shape [P]\n\n Modifies the following features:\n - lasers.points_xyz removing any points out of frustum.\n - lasers.points_feature removing any points out of frustum.\n\n Note: We expect a downstream processor that filters out boxes with few points\n to drop the corresponding bboxes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_theta_range', (0., np.pi),\n 'Only points that have theta coordinates within this range.')\n p.Define('keep_phi_range', (0., 2. * np.pi),\n 'Only points that have phi coordinates within this range.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n if 'points_padding' in features.lasers:\n raise ValueError('DropPointsOutOfFrustum preprocessor does not support '\n 'padded lasers.')\n\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n\n min_theta, max_theta = p.keep_theta_range\n if (min_theta < 0. or min_theta > np.pi or max_theta < 0. or\n max_theta > np.pi):\n raise ValueError('Valid values for theta are between 0 and pi, '\n 'keep_theta_range={}'.format(p.keep_theta_range))\n\n if min_theta > max_theta:\n raise ValueError('min_theta must be <= max_theta, '\n 'keep_theta_range={}'.format(p.keep_theta_range))\n\n min_phi, max_phi = p.keep_phi_range\n if (min_phi < -2. * np.pi or min_phi > 2. * np.pi or\n max_phi < -2. * np.pi or max_phi > 2. * np.pi):\n raise ValueError('Valid values for phi are between -2*pi and 2*pi,'\n 'keep_phi_range={}'.format(p.keep_phi_range))\n\n if min_phi > max_phi:\n raise ValueError('min_phi must be <= max_phi, '\n 'keep_phi_range={}'.format(p.keep_phi_range))\n\n _, theta, phi = tf.unstack(\n geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)\n\n # phi is returned in range [-pi, pi], we shift the values which are between\n # [-pi, 0] to be [pi, 2pi] instead to make the logic below easier to follow.\n # Hence, all phi values after this will be [0, 2pi].\n phi = tf.where(phi >= 0., phi, 2. * np.pi + phi)\n\n # Theta does not have circular boundary conditions, a simple check suffices.\n points_mask = (theta >= min_theta) & (theta <= max_theta)\n\n if min_phi < 0. and max_phi < 0.:\n # Both are less than zero, we just just add 2pi and will use the regular\n # check.\n min_phi += 2. * np.pi\n max_phi += 2. * np.pi\n\n if min_phi < 0.:\n # The minimum threshold is below 0, so we split into checking between\n # (0 to min_phi) and (0 to max_phi). Note that min_phi is negative, but\n # phi is always positive, so we take 2*pi + min_phi to get the range of\n # appropriate values.\n points_mask &= (phi >= (2. * np.pi + min_phi)) | (phi <= max_phi)\n else:\n # Both must be greater than 0 if we get to this condition.\n assert min_phi >= 0.\n assert max_phi >= 0.\n points_mask &= (phi >= min_phi) & (phi <= max_phi)\n\n features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)\n features.lasers.points_feature = tf.boolean_mask(points_feature,\n points_mask)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass DropBoxesOutOfRange(Preprocessor):\n \"\"\"Drops boxes outside of pre-defined x/y/z ranges (boundaries inclusive).\n\n This preprocessor expects features to contain the following keys:\n - labels.bboxes_3d of shape [N, 7]\n - labels.bboxes_3d_mask of shape [N]\n\n Modifies the following features:\n - labels.bboxes_3d_mask to mask out any additional boxes.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_x_range', (-np.inf, np.inf),\n 'Only boxes that have x coordinates within this range are kept.')\n p.Define('keep_y_range', (-np.inf, np.inf),\n 'Only boxes that have y coordinates within this range are kept.')\n p.Define('keep_z_range', (-np.inf, np.inf),\n 'Only boxes that have z coordinates within this range are kept.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n min_x, max_x = p.keep_x_range\n min_y, max_y = p.keep_y_range\n min_z, max_z = p.keep_z_range\n\n # Short-circuit if all ranges are set to -inf, inf.\n if (np.all(np.isneginf([min_x, min_y, min_z])) and\n np.all(np.isposinf([max_x, max_y, max_z]))):\n return features\n\n # For each bounding box, compute whether any of its extrema\n # fall outside of the range.\n bboxes_3d_corners = geometry.BBoxCorners(\n features.labels.bboxes_3d[tf.newaxis, ...])[0]\n bboxes_3d_corners = py_utils.HasShape(bboxes_3d_corners, [-1, 8, 3])\n\n min_bbox_x = tf.reduce_min(bboxes_3d_corners[:, :, 0], axis=-1)\n max_bbox_x = tf.reduce_max(bboxes_3d_corners[:, :, 0], axis=-1)\n\n min_bbox_y = tf.reduce_min(bboxes_3d_corners[:, :, 1], axis=-1)\n max_bbox_y = tf.reduce_max(bboxes_3d_corners[:, :, 1], axis=-1)\n\n min_bbox_z = tf.reduce_min(bboxes_3d_corners[:, :, 2], axis=-1)\n max_bbox_z = tf.reduce_max(bboxes_3d_corners[:, :, 2], axis=-1)\n\n mask = (\n tf.math.logical_and(min_bbox_x >= min_x, max_bbox_x <= max_x)\n & tf.math.logical_and(min_bbox_y >= min_y, max_bbox_y <= max_y)\n & tf.math.logical_and(min_bbox_z >= min_z, max_bbox_z <= max_z))\n\n max_num_boxes = py_utils.GetShape(features.labels.bboxes_3d_mask)\n mask = py_utils.HasShape(mask, max_num_boxes)\n\n features.labels.bboxes_3d_mask *= tf.cast(mask, tf.float32)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass PadLaserFeatures(Preprocessor):\n \"\"\"Pads laser features so that the dimensions are fixed.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n\n and optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n lasers.points_xyz and lasers.points_feature to add padding.\n Optionally also modifies lasers.points_label and lasers.points_bbox_id\n if they exist to add padding.\n Modifies/adds the following features:\n labels.points_padding of shape [P] representing the padding.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('max_num_points', 128500,\n 'Max number of points to pad the points to.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_mask = tf.cast(points_mask, tf.bool)\n features.lasers = features.lasers.Transform(\n _GetApplyPointMaskFn(points_mask))\n\n npoints = tf.shape(features.lasers.points_xyz)[0]\n features.lasers.points_padding = tf.ones([npoints])\n\n shuffled_idx = tf.range(npoints)\n shuffled_idx = tf.random.shuffle(shuffled_idx, seed=p.random_seed)\n\n def _PadOrTrimFn(points_tensor):\n # Shuffle before trimming so we have a random sampling\n points_tensor = tf.gather(points_tensor, shuffled_idx)\n return py_utils.PadOrTrimTo(points_tensor, [p.max_num_points] +\n points_tensor.shape[1:].as_list())\n\n features.lasers = features.lasers.Transform(_PadOrTrimFn)\n features.lasers.points_padding = 1.0 - features.lasers.points_padding\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n\n def _TransformShape(points_shape):\n return tf.TensorShape([p.max_num_points] + points_shape[1:].as_list())\n\n shapes.lasers = shapes.lasers.Transform(_TransformShape)\n shapes.lasers.points_padding = tf.TensorShape([p.max_num_points])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.lasers.points_padding = tf.float32\n return dtypes\n\n\nclass WorldScaling(Preprocessor):\n \"\"\"Scale the world randomly as a form of data augmentation.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same scaling applied to both.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('scaling', None, 'The scaling range.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.scaling is None:\n raise ValueError('scaling needs to be specified, instead of None.')\n if len(p.scaling) != 2:\n raise ValueError('scaling needs to be a list of two elements.')\n\n def TransformFeatures(self, features):\n p = self.params\n scaling = tf.random.uniform((),\n minval=p.scaling[0],\n maxval=p.scaling[1],\n seed=p.random_seed,\n dtype=features.lasers.points_xyz.dtype)\n\n # Scale points [num_points, 3].\n features.lasers.points_xyz *= scaling\n\n # Scaling bboxes (location and dimensions).\n bboxes_xyz = features.labels.bboxes_3d[..., :3] * scaling\n bboxes_dims = features.labels.bboxes_3d[..., 3:6] * scaling\n bboxes_rot = features.labels.bboxes_3d[..., 6:]\n\n features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],\n axis=-1)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomDropLaserPoints(Preprocessor):\n \"\"\"Randomly dropout laser points and the corresponding features.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, K]\n\n\n Modifies the following features:\n lasers.points_xyz, lasers.points_feature.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('keep_prob', 0.95, 'Probability for keeping points.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n num_points, _ = py_utils.GetShape(features.lasers.points_xyz)\n\n pts_keep_sample_prob = tf.random.uniform([num_points],\n minval=0,\n maxval=1,\n seed=p.random_seed)\n pts_keep_mask = pts_keep_sample_prob < p.keep_prob\n\n if 'points_padding' in features.lasers:\n # Update points_padding so that where pts_keep_mask is True,\n # points_padding remains 0.\n points_mask = 1 - features.lasers.points_padding\n points_mask *= tf.cast(pts_keep_mask, tf.float32)\n features.lasers.points_padding = 1 - points_mask\n else:\n features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,\n pts_keep_mask)\n features.lasers.points_feature = tf.boolean_mask(\n features.lasers.points_feature, pts_keep_mask)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomFlipY(Preprocessor):\n \"\"\"Flip the world along axis Y as a form of data augmentation.\n\n When there are leading dimensions, this will flip the boxes with the same\n transformation across all the frames. This is useful when the input is a\n sequence of frames from the same run segment.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [..., 3]\n - labels.bboxes_3d of shape [..., 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same flipping applied to both.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('flip_probability', 0.5, 'Probability of flipping.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n threshold = 1. - p.flip_probability\n choice = tf.random.uniform(\n (), minval=0.0, maxval=1.0, seed=p.random_seed) >= threshold\n\n # Flip points\n points_xyz = features.lasers.points_xyz\n points_y = tf.where(choice, -points_xyz[..., 1:2], points_xyz[..., 1:2])\n features.lasers.points_xyz = tf.concat(\n [points_xyz[..., 0:1], points_y, points_xyz[..., 2:3]], axis=-1)\n\n # Flip boxes\n bboxes_xyz = features.labels.bboxes_3d[..., :3]\n bboxes_y = tf.where(choice, -bboxes_xyz[..., 1:2], bboxes_xyz[..., 1:2])\n bboxes_xyz = tf.concat(\n [bboxes_xyz[..., 0:1], bboxes_y, bboxes_xyz[..., 2:3]], axis=-1)\n # Compensate rotation.\n bboxes_dims = features.labels.bboxes_3d[..., 3:6]\n bboxes_rot = features.labels.bboxes_3d[..., 6:]\n bboxes_rot = tf.where(choice, geometry.WrapAngleRad(-bboxes_rot),\n bboxes_rot)\n features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],\n axis=-1)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass GlobalTranslateNoise(Preprocessor):\n \"\"\"Add global translation noise of xyz coordinates to points and boxes.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - labels.bboxes_3d of shape [L, 7]\n\n Modifies the following features:\n lasers.points_xyz, labels.bboxes_3d with the same\n random translation noise applied to both.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('noise_std', [0.2, 0.2, 0.2],\n 'Standard deviation of translation noise per axis.')\n return p\n\n def TransformFeatures(self, features):\n p = self.params\n # Use three different seeds but the same base seed so\n # that the values are different.\n base_seed = p.random_seed\n x_seed = base_seed\n y_seed = None if base_seed is None else base_seed + 1\n z_seed = None if base_seed is None else base_seed + 2\n random_translate_x = tf.random.normal((),\n mean=0.0,\n stddev=p.noise_std[0],\n seed=x_seed)\n random_translate_y = tf.random.normal((),\n mean=0.0,\n stddev=p.noise_std[1],\n seed=y_seed)\n random_translate_z = tf.random.normal((),\n mean=0.0,\n stddev=p.noise_std[2],\n seed=z_seed)\n\n pose = tf.stack([\n random_translate_x, random_translate_y, random_translate_z, 0.0, 0.0,\n 0.0\n ],\n axis=0)\n\n # Translate points.\n points_xyz = features.lasers.points_xyz\n features.lasers.points_xyz = geometry.CoordinateTransform(points_xyz, pose)\n\n # Translate boxes\n bboxes_xyz = features.labels.bboxes_3d[..., :3]\n bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)\n features.labels.bboxes_3d = tf.concat(\n [bboxes_xyz, features.labels.bboxes_3d[..., 3:]], axis=-1)\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomBBoxTransform(Preprocessor):\n \"\"\"Randomly transform bounding boxes and the points inside them.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, K]\n - lasers.points_padding of shape [P]\n - labels.bboxes_3d of shape [L, 7]\n - labels.bboxes_3d_mask of shape [L]\n\n Modifies the following features:\n lasers.points_{xyz,feature,padding}, labels.bboxes_3d with the\n transformed bounding boxes and points.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'max_rotation', None,\n 'The rotation amount will be randomly picked from '\n '[-max_rotation, max_rotation).')\n # At the moment we don't use this because it can cause boxes to collide with\n # each other. We need to compute box intersections when deciding whether to\n # apply the translation jitter. Theoretically we should also do this for\n # rotation.\n p.Define('noise_std', [0.0, 0.0, 0.0],\n 'Standard deviation of translation noise per axis.')\n p.Define(\n 'max_scaling', None,\n 'When max_scaling is not none, delta parameters s_x, s_y, s_z are '\n 'drawn from [-max_scaling[i], max_scaling[i]] where i is in [0, 3].')\n p.Define(\n 'max_shearing', None,\n 'When max_shearing is not none, shearing parameters sh_x^y, sh_x^z, '\n 'sh_y^x, sh_y^z, sh_z^x, sh_z^y are drawn from '\n '[-max_shearing[i], max_shearing[i]], where i is in [0, 5].')\n p.Define(\n 'max_num_points_per_bbox', 16384,\n 'The maximum number of points that fall within a bounding box. '\n 'Bounding boxes with more points than this value will '\n 'have some points droppped.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.max_rotation is None:\n raise ValueError('max_rotation needs to be specified, instead of None.')\n if p.max_scaling is not None:\n if len(p.max_scaling) != 3:\n raise ValueError('max_scaling needs to be specified as either None or '\n 'list of 3 floating point numbers, instead of {}.'\n ''.format(p.max_scaling))\n if p.max_shearing is not None:\n if len(p.max_shearing) != 6:\n raise ValueError('max_shearing needs to be specified as either None or '\n 'list of 6 floating point numbers, instead of {}.'\n ''.format(p.max_shearing))\n\n def _Foreground(self, features, points_xyz, points_feature, real_bboxes_3d,\n points_in_bbox_mask, rotation, translate_pose, transform_fn):\n \"\"\"Extract and transform foreground points and features.\"\"\"\n out_bbox_xyz, out_bbox_feature, out_bbox_mask = self._ForLoopBuffers(\n features)\n\n # Only iterate over the actual number of boxes in the scene.\n actual_num_bboxes = tf.reduce_sum(\n tf.cast(features.labels.bboxes_3d_mask, tf.int32))\n\n ret = py_utils.ForLoop(\n body=transform_fn,\n start=0,\n limit=actual_num_bboxes,\n delta=1,\n loop_state=py_utils.NestedMap(\n points_xyz=points_xyz,\n points_feature=points_feature,\n bboxes_3d=real_bboxes_3d,\n points_in_bbox_mask=points_in_bbox_mask,\n rotation=rotation,\n translate_pose=translate_pose,\n out_bbox_points=out_bbox_xyz,\n out_bbox_feature=out_bbox_feature,\n out_bbox_mask=out_bbox_mask))\n\n # Gather all of the transformed points and features\n out_bbox_xyz = tf.reshape(ret.out_bbox_points, [-1, 3])\n num_features = features.lasers.points_feature.shape[-1]\n out_bbox_feature = tf.reshape(ret.out_bbox_feature, [-1, num_features])\n out_bbox_mask = tf.cast(tf.reshape(ret.out_bbox_mask, [-1]), tf.bool)\n fg_xyz = tf.boolean_mask(out_bbox_xyz, out_bbox_mask)\n fg_feature = tf.boolean_mask(out_bbox_feature, out_bbox_mask)\n return fg_xyz, fg_feature\n\n def _Background(self, points_xyz, points_feature, points_in_bbox_mask):\n # If a point is in any bounding box, it is a foreground point.\n foreground_points_mask = tf.reduce_any(points_in_bbox_mask, axis=-1)\n # All others are background. We rotate all of the foreground points to\n # final_points_* and keep the background points unchanged\n background_points_mask = tf.math.logical_not(foreground_points_mask)\n background_points_xyz = tf.boolean_mask(points_xyz, background_points_mask)\n background_points_feature = tf.boolean_mask(points_feature,\n background_points_mask)\n return background_points_xyz, background_points_feature\n\n def _ForLoopBuffers(self, features):\n \"\"\"Create and return the buffers for the for loop.\"\"\"\n p = self.params\n bboxes_3d = features.labels.bboxes_3d\n\n # Compute the shapes and create the buffers for the For loop.\n max_num_bboxes = tf.shape(bboxes_3d)[0]\n per_box_shape = [max_num_bboxes, p.max_num_points_per_bbox, 3]\n out_bbox_points = inplace_ops.empty(\n per_box_shape, dtype=tf.float32, init=True)\n\n num_features = features.lasers.points_feature.shape[-1]\n bbox_feature_shape = [\n max_num_bboxes, p.max_num_points_per_bbox, num_features\n ]\n out_bbox_feature = inplace_ops.empty(\n bbox_feature_shape, dtype=tf.float32, init=True)\n\n per_box_mask_shape = [max_num_bboxes, p.max_num_points_per_bbox]\n out_bbox_mask = inplace_ops.empty(\n per_box_mask_shape, dtype=tf.float32, init=True)\n\n return out_bbox_points, out_bbox_feature, out_bbox_mask\n\n def TransformFeatures(self, features):\n p = self.params\n\n num_features = features.lasers.points_feature.shape[-1]\n\n def Transform(i, state):\n \"\"\"Transform the points in bounding box `i`.\"\"\"\n state.points_xyz = tf.reshape(state.points_xyz, [-1, 3])\n bbox_mask = tf.reshape(state.points_in_bbox_mask[:, i], [-1])\n\n # Fetch only the points in the bounding box.\n points_xyz_masked = tf.boolean_mask(state.points_xyz, bbox_mask)\n points_feature_masked = tf.boolean_mask(state.points_feature, bbox_mask)\n\n num_points = tf.shape(points_xyz_masked)[0]\n\n # TODO(vrv): Fold the following into a single transformation\n # matrix.\n #\n # Translate the box to the origin, then rotate the desired\n # rotation angle.\n translation_vec = state.bboxes_3d[i, 0:3]\n rotation_vec = [state.rotation[i], 0., 0.]\n pose = tf.concat([-translation_vec, rotation_vec], axis=0)\n points_xyz_adj = geometry.CoordinateTransform(points_xyz_masked, pose)\n if p.max_scaling is not None or p.max_shearing is not None:\n # Translate the points in the bounding box by moving dz/2 so that the\n # bottom of the bounding box is at Z = 0 when any of the two\n # (max_scaling or max_shearing) is not None\n translation_scale_or_shear = tf.stack(\n [0., 0., state.bboxes_3d[i, 5] / 2], axis=0)\n pose1 = tf.concat([translation_scale_or_shear, [0., 0., 0.]], axis=0)\n points_xyz_adj = geometry.CoordinateTransform(points_xyz_adj, pose1)\n else:\n translation_scale_or_shear = tf.stack([0., 0., 0.], axis=0)\n\n if p.max_scaling is not None:\n # Perform scaling to the point cloud\n # Scaling matrix\n # [[s_x+1 0 0]\n # [ 0 s_y+1 0]\n # [ 0 0 s_z+1]]\n sx = tf.random.uniform([],\n minval=-p.max_scaling[0],\n maxval=p.max_scaling[0],\n seed=p.random_seed)\n sy = tf.random.uniform([],\n minval=-p.max_scaling[1],\n maxval=p.max_scaling[1],\n seed=p.random_seed)\n sz = tf.random.uniform([],\n minval=-p.max_scaling[2],\n maxval=p.max_scaling[2],\n seed=p.random_seed)\n scaling_matrix = tf.stack(\n [[sx + 1., 0., 0.], [0., sy + 1., 0.], [0., 0., sz + 1.]], axis=0)\n\n points_xyz_adj = tf.einsum('ij,kj->ki', scaling_matrix, points_xyz_adj)\n\n if p.max_shearing is not None:\n # Perform shearing to the point cloud\n # Shearing matrix\n # [[1 sh_x^y sh_x^z]\n # [sh_y^x 1 sh_y^z]\n # [sh_z^x sh_z^y 1 ]]\n sxy = tf.random.uniform([],\n minval=-p.max_shearing[0],\n maxval=p.max_shearing[0],\n seed=p.random_seed)\n sxz = tf.random.uniform([],\n minval=-p.max_shearing[1],\n maxval=p.max_shearing[1],\n seed=p.random_seed)\n syx = tf.random.uniform([],\n minval=-p.max_shearing[2],\n maxval=p.max_shearing[2],\n seed=p.random_seed)\n syz = tf.random.uniform([],\n minval=-p.max_shearing[3],\n maxval=p.max_shearing[3],\n seed=p.random_seed)\n szx = tf.random.uniform([],\n minval=-p.max_shearing[4],\n maxval=p.max_shearing[4],\n seed=p.random_seed)\n szy = tf.random.uniform([],\n minval=-p.max_shearing[5],\n maxval=p.max_shearing[5],\n seed=p.random_seed)\n shearing_matrix = tf.stack(\n [[1., sxy, sxz], [syx, 1., syz], [szx, szy, 1.]], axis=0)\n points_xyz_adj = tf.einsum('ij,kj->ki', shearing_matrix, points_xyz_adj)\n\n # Translate the points back, adding noise if needed.\n translation_with_noise = (\n translation_vec - translation_scale_or_shear +\n state.translate_pose[i])\n pose2 = tf.concat([translation_with_noise, [0., 0., 0.]], axis=0)\n final_points_xyz = geometry.CoordinateTransform(points_xyz_adj, pose2)\n\n # final_points_xyz is an [M, 3] Tensor where M is the number of points in\n # the box.\n points_mask = tf.ones([num_points], dtype=tf.float32)\n\n final_points_xyz = py_utils.PadOrTrimTo(final_points_xyz,\n [p.max_num_points_per_bbox, 3])\n final_points_feature = py_utils.PadOrTrimTo(\n points_feature_masked, [p.max_num_points_per_bbox, num_features])\n points_mask = py_utils.PadOrTrimTo(points_mask,\n [p.max_num_points_per_bbox])\n state.out_bbox_points = inplace_ops.alias_inplace_update(\n state.out_bbox_points, [i], tf.expand_dims(final_points_xyz, 0))\n state.out_bbox_feature = inplace_ops.alias_inplace_update(\n state.out_bbox_feature, [i], tf.expand_dims(final_points_feature, 0))\n state.out_bbox_mask = inplace_ops.alias_inplace_update(\n state.out_bbox_mask, [i], tf.expand_dims(points_mask, 0))\n\n return state\n\n # Get the points and features that reside in boxes.\n if 'points_padding' in features.lasers:\n points_mask = 1 - features.lasers.points_padding\n points_xyz = tf.boolean_mask(features.lasers.points_xyz, points_mask)\n points_feature = tf.boolean_mask(features.lasers.points_feature,\n points_mask)\n else:\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n\n # Fetch real bounding boxes and compute point mask.\n real_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d,\n features.labels.bboxes_3d_mask)\n points_in_bbox_mask = geometry.IsWithinBBox3D(points_xyz, real_bboxes_3d)\n\n # Choose a random rotation for every real box.\n num_boxes = tf.shape(real_bboxes_3d)[0]\n rotation = tf.random.uniform([num_boxes],\n minval=-p.max_rotation,\n maxval=p.max_rotation,\n seed=p.random_seed)\n\n base_seed = p.random_seed\n x_seed = base_seed\n y_seed = None if base_seed is None else base_seed + 1\n z_seed = None if base_seed is None else base_seed + 2\n random_translate_x = tf.random.normal([num_boxes],\n mean=0.0,\n stddev=p.noise_std[0],\n seed=x_seed)\n random_translate_y = tf.random.normal([num_boxes],\n mean=0.0,\n stddev=p.noise_std[1],\n seed=y_seed)\n random_translate_z = tf.random.normal([num_boxes],\n mean=0.0,\n stddev=p.noise_std[2],\n seed=z_seed)\n\n translate_pose = tf.stack(\n [random_translate_x, random_translate_y, random_translate_z], axis=1)\n\n fg_xyz, fg_feature = self._Foreground(features, points_xyz, points_feature,\n real_bboxes_3d, points_in_bbox_mask,\n rotation, translate_pose, Transform)\n\n # Concatenate them with the background points and features.\n bg_xyz, bg_feature = self._Background(points_xyz, points_feature,\n points_in_bbox_mask)\n all_points = tf.concat([bg_xyz, fg_xyz], axis=0)\n all_features = tf.concat([bg_feature, fg_feature], axis=0)\n\n # Shuffle the points/features randomly.\n all_points, all_features = _ConsistentShuffle((all_points, all_features),\n p.random_seed)\n\n # Padding should technically be unnecessary: the number of points before and\n # after should be the same, but in practice we sometimes seem to drop a few\n # points, and so we pad to make the shape fixed.\n #\n # TODO(vrv): Identify the source of this problem and then assert a shape\n # matching check.\n if 'points_padding' in features.lasers:\n features.lasers.points_xyz = py_utils.PadOrTrimTo(\n all_points, tf.shape(features.lasers.points_xyz))\n features.lasers.points_feature = py_utils.PadOrTrimTo(\n all_features, tf.shape(features.lasers.points_feature))\n total_points = tf.shape(all_points)[0]\n features.lasers.points_padding = 1.0 - py_utils.PadOrTrimTo(\n tf.ones([total_points]), tf.shape(features.lasers.points_padding))\n else:\n features.lasers.points_xyz = all_points\n features.lasers.points_feature = all_features\n\n # Translate noise.\n bboxes_xyz = real_bboxes_3d[..., :3]\n bboxes_xyz += translate_pose[..., :3]\n\n bboxes_dim = real_bboxes_3d[..., 3:6]\n # Rotate bboxes by their corresponding rotation.\n bboxes_rot = real_bboxes_3d[..., 6:]\n bboxes_rot -= rotation[:, tf.newaxis]\n features.labels.bboxes_3d = py_utils.PadOrTrimTo(\n tf.concat([bboxes_xyz, bboxes_dim, bboxes_rot], axis=-1),\n tf.shape(features.labels.bboxes_3d))\n features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(\n tf.ones(tf.shape(real_bboxes_3d)[0]),\n tf.shape(features.labels.bboxes_3d_mask))\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass GroundTruthAugmentor(Preprocessor):\n \"\"\"Augment bounding box labels and points from a database.\n\n This preprocessor expects features to contain the following keys:\n lasers.points_xyz of shape [P, 3]\n\n lasers.points_feature of shape [P, K]\n\n lasers.points_padding of shape [P]\n\n labels.bboxes_3d of shape [L, 7]\n\n labels.bboxes_3d_mask of shape [L]\n\n labels.labels of shape [L]\n\n Modifies the above features so that additional objects from\n a groundtruth database are added.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'groundtruth_database', None,\n 'If not None, loads groundtruths from this database and adds '\n 'them to the current scene. Groundtruth database is expected '\n 'to be a TFRecord of KITTI or Waymo crops.')\n p.Define(\n 'num_db_objects', None,\n 'Number of objects in the database. Because we use TFRecord '\n 'we cannot easily query the number of objects efficiencly.')\n p.Define('max_num_points_per_bbox', 2048,\n 'Maximum number of points in each bbox to augment with.')\n p.Define(\n 'filter_min_points', 0,\n 'Minimum number of points each database object must have '\n 'to be included in an example.')\n p.Define(\n 'filter_max_points', None,\n 'Maximum number of points each database object must have '\n 'to be included in an example.')\n p.Define(\n 'difficulty_sampling_probability', None,\n 'Probability for sampling ground truth example whose difficulty '\n 'equals {0, 1, 2, 3, ...}. Example: [1.0, 1.0, 1.0, 1.0] for '\n 'uniform sampling 4 different difficulties. Default value is '\n 'None = uniform sampling for all difficulties.')\n p.Define(\n 'class_sampling_probability', None,\n 'Probability for sampling ground truth example based on its class index'\n ' Example: For KITTI classes are [Background, Car, Van, Truck, '\n 'Pedestrian, Person_sitting, Cyclist, Tram, Misc, DontCare], using '\n 'probability vector [0., 1.0, 1.0, 0., 0., 0., 0.,0., 0., 0.], we '\n 'uniformly sampling Car and Van. Default value is None: Uses '\n 'label_filter flag and does not sample based on class.')\n p.Define('filter_min_difficulty', 0,\n 'Filter ground truth boxes whose difficulty is < this value.')\n p.Define('max_augmented_bboxes', 15,\n 'Maximum number of augmented bounding boxes per scene.')\n p.Define(\n 'label_filter', [],\n 'A list where if specified, only examples of these label integers will '\n 'be included in an example.')\n p.Define(\n 'batch_mode', False, 'Bool value to control whether the whole'\n 'groundtruth database is loaded or partially loaded to save memory'\n 'usage. Setting to False loads the whole ground truth database into '\n 'memory. Otherwise, only a fraction of the data will be loaded into '\n 'the memory.')\n return p\n\n def _ReadDB(self, file_patterns):\n \"\"\"Read the groundtruth database and return as a NestedMap of Tensors.\"\"\"\n p = self.params\n\n def Process(record):\n \"\"\"Process a groundtruth record.\"\"\"\n feature_map = {\n 'num_points': tf.io.FixedLenFeature((), tf.int64, 0),\n 'points': tf.io.VarLenFeature(dtype=tf.float32),\n 'points_feature': tf.io.VarLenFeature(dtype=tf.float32),\n 'bbox_3d': tf.io.VarLenFeature(dtype=tf.float32),\n 'label': tf.io.FixedLenFeature((), tf.int64, 0),\n 'difficulty': tf.io.FixedLenFeature((), tf.int64, 0),\n 'text': tf.io.VarLenFeature(dtype=tf.string),\n }\n\n example_data = tf.io.parse_single_example(record, feature_map)\n num_points = example_data['num_points']\n\n points = tf.reshape(_Dense(example_data['points']), [num_points, 3])\n features = tf.reshape(\n _Dense(example_data['points_feature']), [num_points, 1])\n points_mask = tf.ones(num_points, dtype=tf.bool)\n\n # TODO(vrv): Use random selection instead of first N points.\n points = py_utils.PadOrTrimTo(points, [p.max_num_points_per_bbox, 3])\n features = py_utils.PadOrTrimTo(features, [p.max_num_points_per_bbox, 1])\n points_mask = py_utils.PadOrTrimTo(points_mask,\n [p.max_num_points_per_bbox])\n\n bboxes_3d = tf.reshape(_Dense(example_data['bbox_3d']), [7])\n label = tf.cast(example_data['label'], tf.int32)\n difficulty = tf.cast(example_data['difficulty'], tf.int32)\n return (points, features, points_mask, bboxes_3d, label, difficulty)\n\n if p.batch_mode:\n # Prepare dataset for ground truth bounding boxes. Randomly shuffle the\n # file patterns.\n file_count = len(tf.io.gfile.glob(file_patterns))\n dataset = tf.stateless_list_files(file_patterns)\n dataset = dataset.apply(tf.stateless_cache_dataset())\n dataset = dataset.apply(\n tf.stateless_shuffle_dataset(\n buffer_size=file_count, reshuffle_each_iteration=True))\n dataset = dataset.interleave(\n tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)\n dataset = dataset.repeat()\n # Only prefetch a few objects from the database to reduce memory\n # consumption.\n dataset = dataset.map(Process, num_parallel_calls=10)\n # We need more bboxes than max_augmented_bboxes in a batch, because some\n # of the boxes are filtered out.\n dataset = dataset.batch(p.max_augmented_bboxes * 10)\n dataset = dataset.apply(tf.stateless_cache_dataset()).prefetch(\n p.max_augmented_bboxes * 30)\n else:\n # Prepare dataset for ground truth bounding boxes.\n dataset = tf.stateless_list_files(file_patterns)\n dataset = dataset.interleave(\n tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)\n # Read the entire dataset into memory.\n dataset = dataset.take(p.num_db_objects)\n dataset = dataset.map(Process, num_parallel_calls=10)\n # We batch the output of the dataset into a very large Tensor, then cache\n # it in memory.\n dataset = dataset.batch(p.num_db_objects)\n dataset = dataset.apply(tf.stateless_cache_dataset()).repeat()\n\n iterator = dataset.make_one_shot_iterator()\n input_batch = iterator.get_next()\n\n (db_points_xyz, db_points_feature, db_points_mask, db_bboxes, db_labels,\n db_difficulties) = input_batch\n return py_utils.NestedMap(\n points_xyz=db_points_xyz,\n points_feature=db_points_feature,\n points_mask=db_points_mask,\n bboxes_3d=db_bboxes,\n labels=db_labels,\n difficulties=db_difficulties)\n\n def _CreateExampleFilter(self, db):\n \"\"\"Construct db example filter.\n\n Args:\n db: NestedMap of the following Tensors: points_mask - [N, P] - The points\n mask for every object in the database, where N is the number of objects\n and P is the maximum number of points per object. labels - [N] - int32\n Label for each object in the database. difficulties - [N] - int32\n Difficulty for each label in the database.\n\n Returns:\n A [N] boolean Tensor for each object in the database, True if\n that corresponding object passes the filter.\n \"\"\"\n p = self.params\n db_points_mask = db.points_mask\n db_label = db.labels\n db_difficulty = db.difficulties\n\n num_objects_in_database = tf.shape(db_points_mask)[0]\n\n # Filter number of objects.\n points_per_object = tf.reduce_sum(tf.cast(db_points_mask, tf.int32), axis=1)\n example_filter = points_per_object >= p.filter_min_points\n if p.filter_max_points:\n example_filter = tf.math.logical_and(\n example_filter, points_per_object <= p.filter_max_points)\n\n if p.difficulty_sampling_probability is not None:\n # Sample db based on difficulity of each example.\n sampling_prob = p.difficulty_sampling_probability\n db_difficulty_probability = tf.zeros_like(db_difficulty, dtype=tf.float32)\n for difficulty_idx, difficulty_prob in enumerate(sampling_prob):\n db_difficulty_probability += (\n tf.cast(tf.equal(db_difficulty, difficulty_idx), tf.float32) *\n difficulty_prob)\n\n sampled_filter = tf.random.uniform(\n tf.shape(example_filter),\n minval=0,\n maxval=1,\n dtype=tf.float32,\n seed=p.random_seed)\n sampled_filter = sampled_filter < db_difficulty_probability\n example_filter &= sampled_filter\n else:\n # Filter out db examples below min difficulty\n example_filter = tf.math.logical_and(\n example_filter, db_difficulty >= p.filter_min_difficulty)\n\n example_filter = tf.reshape(example_filter, [num_objects_in_database])\n db_label = tf.reshape(db_label, [num_objects_in_database])\n if p.class_sampling_probability is not None:\n # Sample example based on its class probability.\n sampling_prob = p.class_sampling_probability\n db_class_probability = tf.zeros_like(db_label, dtype=tf.float32)\n\n for class_idx, class_prob in enumerate(sampling_prob):\n db_class_probability += (\n tf.cast(tf.equal(db_label, class_idx), tf.float32) * class_prob)\n\n sampled_filter = tf.random.uniform(\n tf.shape(example_filter),\n minval=0,\n maxval=1,\n dtype=tf.float32,\n seed=p.random_seed)\n sampled_filter = sampled_filter < db_class_probability\n example_filter &= sampled_filter\n elif p.label_filter:\n # Filter based on labels.\n # Create a label filter where all is false\n valid_labels = tf.constant(p.label_filter)\n label_mask = tf.reduce_any(\n tf.equal(db_label[..., tf.newaxis], valid_labels), axis=1)\n example_filter = tf.math.logical_and(example_filter, label_mask)\n return example_filter\n\n # TODO(vrv): Create an overlap filter that also ensures that boxes don't\n # overlap with groundtruth points, so that the scenes are more plausible.\n def _FilterIndices(self, gt_bboxes_3d, db_bboxes, db_idx):\n \"\"\"Identify database boxes that don't overlap with other boxes.\"\"\"\n # We accomplish overlap filtering by first computing the pairwise 3D IoU of\n # all boxes (concatenated) as a way of computing pairwise box overlaps.\n num_gt_bboxes = tf.shape(gt_bboxes_3d)[0]\n filtered_bboxes = tf.gather(db_bboxes, db_idx)\n all_bboxes = tf.concat([gt_bboxes_3d, filtered_bboxes], axis=0)\n pairwise_overlap = ops.pairwise_iou3d(all_bboxes, all_bboxes)\n\n # We now have an M x M matrix with 1s on the diagonal and non-zero entries\n # whenever a box collides with another.\n #\n # To increase the number of boxes selected, we filter the upper triangular\n # entries so that the boxes are chosen greedily: boxes with smaller indices\n # will be selected before later boxes, because earlier boxes will not appear\n # to collide with later boxes, but later boxes may collide with earlier\n # ones.\n pairwise_overlap = tf.linalg.band_part(pairwise_overlap, -1, 0)\n\n # We compute the sum of the IoU overlaps for all database boxes.\n db_overlap_sums = tf.reduce_sum(pairwise_overlap[num_gt_bboxes:], axis=1)\n\n # Those boxes that don't overlap with any other boxes will only have\n # a 1.0 IoU with itself.\n non_overlapping_boxes = tf.reshape(db_overlap_sums <= 1., [-1])\n\n # Filter to select only those object ids that pass this filter.\n db_idx = tf.boolean_mask(db_idx, non_overlapping_boxes)\n return db_idx\n\n def TransformFeatures(self, features):\n p = self.params\n\n tf.logging.info('Loading groundtruth database at %s' %\n (p.groundtruth_database))\n db = p.groundtruth_database.Instantiate().BuildDataSource(self._ReadDB).data\n\n original_features_shape = tf.shape(features.lasers.points_feature)\n\n # Compute the number of bboxes to augment.\n num_bboxes_in_scene = tf.reduce_sum(\n tf.cast(features.labels.bboxes_3d_mask, tf.int32))\n max_bboxes = tf.shape(features.labels.bboxes_3d_mask)[0]\n num_augmented_bboxes = tf.minimum(max_bboxes - num_bboxes_in_scene,\n p.max_augmented_bboxes)\n\n # Compute an object index over all objects in the database.\n num_objects_in_database = tf.shape(db.points_xyz)[0]\n db_idx = tf.range(num_objects_in_database)\n\n # Find those indices whose examples pass the filters, and select only those\n # indices.\n example_filter = self._CreateExampleFilter(db)\n db_idx = tf.boolean_mask(db_idx, example_filter)\n\n # At this point, we might still have a large number of object candidates,\n # from which we only need a sample.\n # To reduce the amount of computation, we randomly subsample to slightly\n # more than we want to augment.\n db_idx = tf.random.shuffle(\n db_idx, seed=p.random_seed)[0:num_augmented_bboxes * 5]\n\n # After filtering, further filter out the db boxes that would occlude with\n # other boxes (including other database boxes).\n #\n # Gather the filtered ground truth bounding boxes according to the mask, so\n # we can compute overlaps below.\n gt_bboxes_3d_mask = tf.cast(features.labels.bboxes_3d_mask, tf.bool)\n gt_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d, gt_bboxes_3d_mask)\n gt_bboxes_3d = py_utils.HasShape(gt_bboxes_3d, [num_bboxes_in_scene, 7])\n db_idx = self._FilterIndices(gt_bboxes_3d, db.bboxes_3d, db_idx)\n\n # From the filtered object ids, select only as many boxes as we need.\n shuffled_idx = db_idx[0:num_augmented_bboxes]\n num_augmented_bboxes = tf.shape(shuffled_idx)[0]\n\n # Gather based off the indices.\n sampled_points_xyz = tf.gather(db.points_xyz, shuffled_idx)\n sampled_points_feature = tf.gather(db.points_feature, shuffled_idx)\n sampled_mask = tf.reshape(\n tf.gather(db.points_mask, shuffled_idx),\n [num_augmented_bboxes, p.max_num_points_per_bbox])\n sampled_bboxes = tf.gather(db.bboxes_3d, shuffled_idx)\n sampled_labels = tf.gather(db.labels, shuffled_idx)\n\n # Mask points/features.\n sampled_points_xyz = tf.boolean_mask(sampled_points_xyz, sampled_mask)\n sampled_points_feature = tf.boolean_mask(sampled_points_feature,\n sampled_mask)\n\n # Flatten before concatenation with ground truths.\n sampled_points_xyz = tf.reshape(sampled_points_xyz, [-1, 3])\n sampled_points_feature = tf.reshape(sampled_points_feature,\n [-1, original_features_shape[-1]])\n sampled_bboxes = tf.reshape(sampled_bboxes, [-1, 7])\n\n # Concatenate the samples with the ground truths.\n if 'points_padding' in features.lasers:\n points_mask = tf.cast(1. - features.lasers.points_padding, tf.bool)\n # Densify the original points.\n dense_points_xyz = tf.boolean_mask(features.lasers.points_xyz,\n points_mask)\n dense_points_feature = tf.boolean_mask(features.lasers.points_feature,\n points_mask)\n\n # Concatenate the dense original points with our new sampled oints.\n points_xyz = tf.concat([dense_points_xyz, sampled_points_xyz], axis=0)\n points_feature = tf.concat([dense_points_feature, sampled_points_feature],\n axis=0)\n original_points_shape = tf.shape(features.lasers.points_xyz)\n features.lasers.points_xyz = py_utils.PadOrTrimTo(points_xyz,\n original_points_shape)\n features.lasers.points_feature = py_utils.PadOrTrimTo(\n points_feature, original_features_shape)\n # Compute the modified mask / padding.\n final_points_mask = py_utils.PadOrTrimTo(\n tf.ones(tf.shape(points_xyz)[0]),\n tf.shape(features.lasers.points_padding))\n features.lasers.points_padding = 1. - final_points_mask\n else:\n points_xyz = tf.concat([features.lasers.points_xyz, sampled_points_xyz],\n axis=0)\n points_feature = tf.concat(\n [features.lasers.points_feature, sampled_points_feature], axis=0)\n features.lasers.points_xyz = points_xyz\n features.lasers.points_feature = points_feature\n\n # Reconstruct a new, dense, bboxes_3d vector that includes the filtered\n # groundtruth bounding boxes followed by the database augmented boxes.\n bboxes_3d = tf.concat([gt_bboxes_3d, sampled_bboxes], axis=0)\n bboxes_3d = py_utils.PadOrTrimTo(bboxes_3d, [max_bboxes, 7])\n features.labels.bboxes_3d = bboxes_3d\n bboxes_3d_mask = tf.ones(\n num_bboxes_in_scene + num_augmented_bboxes, dtype=tf.float32)\n features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(\n bboxes_3d_mask, [max_bboxes])\n\n gt_labels = tf.boolean_mask(features.labels.labels, gt_bboxes_3d_mask)\n gt_labels = py_utils.HasShape(gt_labels, [num_bboxes_in_scene])\n\n labels = tf.concat([gt_labels, sampled_labels], axis=0)\n features.labels.labels = py_utils.PadOrTrimTo(labels, [max_bboxes])\n\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass FrustumDropout(Preprocessor):\n \"\"\"Randomly drops out points in a frustum.\n\n All points are first converted to spherical coordinates, and then a point\n is randomly selected. All points in the frustum around that point within\n a given phi, theta angle width and distance to the original greater than\n a given value are dropped with probability = 1 - keep_prob.\n\n Here, we can specify whether the dropped frustum is the union or intersection\n of the phi and theta angle filters.\n\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, K]\n\n Optionally points_padding of shape [P] corresponding to the padding.\n if points_padding is None, then all points are considered valid.\n\n Modifies the following features:\n lasers.points_xyz, lasers.points_feature, lasers.points_padding with points\n randomly dropped out.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('theta_width', 0.03, 'Theta angle width for dropping points.')\n p.Define('phi_width', 0.0, 'Phi angle width for dropping points.')\n p.Define(\n 'distance', 0.0, 'Drop points that have larger distance to the'\n 'origin than the value given here.')\n p.Define(\n 'keep_prob', 0.0, 'keep_prob: 1. = drop no points in the Frustum,'\n '0 = drop all points, between 0 and 1 = down sample the points.')\n p.Define(\n 'drop_type', 'union', 'Drop either the union or intersection of '\n 'phi width and theta width.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.phi_width < 0:\n raise ValueError('phi_width must be >= 0, phi_width={}'.format(\n p.phi_width))\n if p.theta_width < 0:\n raise ValueError('theta_width must be >= 0, theta_width={}'.format(\n p.theta_width))\n if p.distance < 0:\n raise ValueError('distance must be >= 0, distance={}'.format(p.distance))\n if p.keep_prob < 0 or p.keep_prob > 1:\n raise ValueError('keep_prob must be >= 0 and <=1, keep_prob={}'.format(\n p.keep_prob))\n if p.drop_type not in ['union', 'intersection']:\n raise ValueError('drop_type must be union or intersection ,'\n 'drop_type={}'.format(p.drop_type))\n\n def TransformFeatures(self, features):\n p = self.params\n points_xyz = features.lasers.points_xyz\n points_feature = features.lasers.points_feature\n if 'points_padding' in features.lasers:\n points_padding = features.lasers.points_padding\n else:\n points_padding = None\n\n if points_padding is not None:\n points_mask = tf.cast(1 - points_padding, tf.bool)\n num_total_points = py_utils.GetShape(points_mask)[0]\n real_points_idx = tf.boolean_mask(\n tf.range(0, num_total_points, dtype=tf.int32), points_mask)\n num_points = py_utils.GetShape(real_points_idx)[0]\n else:\n points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)\n num_total_points = py_utils.GetShape(points_mask)[0]\n num_points = py_utils.GetShape(points_xyz)[0]\n\n r, theta, phi = tf.unstack(\n geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)\n\n def _PickRandomPoint():\n point_idx = tf.random.uniform((),\n minval=0,\n maxval=num_points,\n dtype=tf.int32)\n if points_padding is not None:\n point_idx = real_points_idx[point_idx]\n return point_idx\n\n # Pick a point at random and drop all points that are near that point in the\n # frustum for distance larger than r; repeat this for both theta and phi.\n if p.theta_width > 0:\n theta_half_width = p.theta_width / 2.\n point_idx = _PickRandomPoint()\n # Points within theta width and further than distance will be dropped.\n theta_drop_filter = ((theta < (theta[point_idx] + theta_half_width)) &\n (theta > (theta[point_idx] - theta_half_width)) &\n (r > p.distance))\n else:\n theta_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)\n\n if p.phi_width > 0:\n phi_half_width = p.phi_width / 2.\n point_idx = _PickRandomPoint()\n # Points within phi width and further than distance will be dropped.\n phi_drop_filter = ((phi < (phi[point_idx] + phi_half_width)) &\n (phi >\n (phi[point_idx] - phi_half_width)) & (r > p.distance))\n else:\n phi_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)\n\n # Create drop_filter by combining filters. This contains a filter for the\n # points to be removed. One can use the intersection method to limit the\n # dropped points be within both phi and theta ranges.\n if p.drop_type == 'union':\n drop_filter = theta_drop_filter | phi_drop_filter\n elif p.drop_type == 'intersection':\n drop_filter = theta_drop_filter & phi_drop_filter\n\n if p.keep_prob == 0:\n # Drop all points in drop_filter.\n down_sampling_filter = drop_filter\n else:\n # Randomly drop points in drop_filter based on keep_prob.\n sampling_drop_filter = tf.random.uniform([num_total_points],\n minval=0,\n maxval=1,\n dtype=tf.float32)\n # Points greater than the threshold (keep_prob) will be dropped.\n sampling_drop_filter = sampling_drop_filter > p.keep_prob\n\n # Instead of dropping all points in the frustum, we drop out points\n # that are in the selected frustum (drop_filter).\n down_sampling_filter = drop_filter & sampling_drop_filter\n\n points_mask &= ~down_sampling_filter\n\n if points_padding is not None:\n features.lasers.points_padding = 1 - tf.cast(points_mask, tf.float32)\n else:\n features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)\n features.lasers.points_feature = tf.boolean_mask(points_feature,\n points_mask)\n\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RepeatPreprocessor(Preprocessor):\n \"\"\"Repeat a preprocessor multiple times.\n\n This preprocessor takes a preprocessor as a subprocessor and apply the\n subprocessor to features multiple times (repeat_count).\n\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('repeat_count', 1, 'Number of times the subprocessor is applied to'\n ' features.')\n p.Define('subprocessor', None, 'One of the input preprocessors.')\n\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.subprocessor is None:\n raise ValueError('No subprocessor was specified for RepeatPreprocessor.')\n if p.repeat_count < 0 or not isinstance(p.repeat_count, int):\n raise ValueError(\n 'repeat_count must be >= 0 and int, repeat_count={}'.format(\n p.repeat_count))\n\n self.CreateChild('subprocessor', p.subprocessor)\n\n def TransformFeatures(self, features):\n p = self.params\n for _ in range(p.repeat_count):\n features = self.subprocessor.FPropDefaultTheta(features)\n\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n for _ in range(p.repeat_count):\n shapes = self.subprocessor.TransformShapes(shapes)\n\n return shapes\n\n def TransformDTypes(self, dtypes):\n p = self.params\n for _ in range(p.repeat_count):\n dtypes = self.subprocessor.TransformDTypes(dtypes)\n\n return dtypes\n\n\nclass RandomApplyPreprocessor(Preprocessor):\n \"\"\"Randomly apply a preprocessor with certain probability.\n\n This preprocessor takes a preprocessor as a subprocessor and apply the\n subprocessor to features with certain probability.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('prob', 1.0, 'The probability the subprocessor being executed.')\n p.Define('subprocessor', None, 'Params for an input preprocessor.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.subprocessor is None:\n raise ValueError('No subprocessor was specified for RepeatPreprocessor.')\n if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float):\n raise ValueError(\n 'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob))\n\n self.CreateChild('subprocessor', p.subprocessor)\n\n def TransformFeatures(self, features):\n p = self.params\n choice = tf.random.uniform(\n (), minval=0.0, maxval=1.0, seed=p.random_seed) <= p.prob\n # Features is passed downstream and may be modified, we make deep copies\n # here to use with tf.cond to avoid having tf.cond access updated\n # versions. Note that we need one copy for each branch in case the branches\n # further modify features.\n features_0, features_1 = features.DeepCopy(), features.DeepCopy()\n features = tf.cond(choice,\n lambda: self.subprocessor.TransformFeatures(features_0),\n lambda: features_1)\n return features\n\n def TransformShapes(self, shapes):\n shapes_transformed = self.subprocessor.TransformShapes(shapes)\n\n if not shapes.IsCompatible(shapes_transformed):\n raise ValueError(\n 'NestedMap structures are different between shapes and transformed'\n 'shapes. Original shapes: {}. Transformed shapes: {}'.format(\n shapes, shapes_transformed))\n\n def IsCompatibleWith(a, b):\n return a.is_compatible_with(b)\n\n if not all(\n py_utils.Flatten(\n py_utils.Transform(IsCompatibleWith, shapes, shapes_transformed))):\n raise ValueError(\n 'Shapes after transformation - {} are different from original '\n 'shapes - {}.'.format(shapes_transformed, shapes))\n\n return shapes\n\n def TransformDTypes(self, dtypes):\n transformed_dtypes = self.subprocessor.TransformDTypes(dtypes)\n if transformed_dtypes != dtypes:\n raise ValueError(\n 'DTypes after transformation of preprocessor - {} should be '\n 'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes,\n transformed_dtypes))\n return dtypes\n\n\nclass ConstantPreprocessor(Preprocessor):\n \"\"\"Preprocessor that produces specified constant values in a nested output.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'constants', py_utils.NestedMap(),\n 'Map of key names to numpy arrays of constant values to use. '\n 'Must be a NestedMap or dict convertible to NestedMap.')\n return p\n\n def TransformFeatures(self, features):\n constants = py_utils.NestedMap(self.params.constants)\n features.update(constants.Transform(tf.constant))\n return features\n\n def TransformShapes(self, shapes):\n constants = py_utils.NestedMap(self.params.constants)\n shapes.update(\n constants.Transform(lambda x: tf.TensorShape(np.array(x).shape)))\n return shapes\n\n def TransformDTypes(self, dtypes):\n constants = py_utils.NestedMap(self.params.constants)\n dtypes.update(constants.Transform(lambda x: tf.as_dtype(np.array(x).dtype)))\n return dtypes\n\n\nclass IdentityPreprocessor(Preprocessor):\n \"\"\"Preprocessor that passes all inputs through.\n\n This may be useful for situations where one wants a 'no-op' preprocessor, such\n as being able to randomly choose to do nothing among a set of preprocessor\n choices.\n \"\"\"\n\n def TransformFeatures(self, features):\n return features\n\n def TransformShapes(self, shapes):\n return shapes\n\n def TransformDTypes(self, dtypes):\n return dtypes\n\n\nclass RandomChoicePreprocessor(Preprocessor):\n \"\"\"Randomly applies a preprocessor with specified weights.\n\n The input at features[p.weight_tensor_key] must be a floating point vector\n Tensor whose length matches the number of subprocessors to select among. The\n values in that Tensor are interpreted as relative weights.\n\n For example, if p.subprocessors = [preprocessor1, preprocessor2] and the\n weights are [1., 2.], then preprocessor1 will be applied with probability 1/3,\n and preprocessor2 will be applied with probability 2/3.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'subprocessors', [],\n 'Params for preprocessors. Each value should be a tuple of '\n '(Preprocessor.Params(), BaseSchedule.Params()), where the schedule '\n 'defines the weights to use over time.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if not p.subprocessors:\n raise ValueError('No subprocessors were specified.')\n\n subprocessors, schedules = zip(*p.subprocessors)\n\n def _FilterNonSchedules(v):\n return not issubclass(getattr(v, 'cls', False), schedule.BaseSchedule)\n\n invalid_values = [_FilterNonSchedules(s) for s in schedules]\n if any(invalid_values):\n raise TypeError('Not all schedule values were schedules: '\n f'{invalid_values}')\n\n self.CreateChildren('subprocessors', list(subprocessors))\n self.CreateChildren('schedules', list(schedules))\n\n def TransformFeatures(self, features):\n p = self.params\n\n choice_list = []\n weight_list = []\n\n # Pass a unique copy of the input to each branch, in case the\n # subprocessor destructively modifies the features in unexpected ways.\n for subp, sched in zip(self.subprocessors, self.schedules):\n choice_list.append(\n lambda subp=subp: subp.TransformFeatures(features.DeepCopy()))\n weight_list.append(sched.Value())\n\n weight_tensor = tf.stack(weight_list)\n chosen_bin = tf.random.categorical(\n tf.math.log(weight_tensor[tf.newaxis]),\n 1,\n seed=p.random_seed,\n dtype=tf.int32)[0, 0]\n features = tf.switch_case(chosen_bin, branch_fns=choice_list)\n return features\n\n def TransformShapes(self, shapes):\n transformed_shapes = [\n subp.TransformShapes(shapes.DeepCopy()) for subp in self.subprocessors\n ]\n if not all(transformed_shapes[0] == curr for curr in transformed_shapes):\n raise ValueError('Shapes after transformations were not identical: '\n f'{transformed_shapes}')\n return transformed_shapes[0]\n\n def TransformDTypes(self, dtypes):\n transformed_dtypes = [\n subp.TransformDTypes(dtypes.DeepCopy()) for subp in self.subprocessors\n ]\n if not all(transformed_dtypes[0] == curr for curr in transformed_dtypes):\n raise ValueError('DTypes after transformations were not identical: '\n f'{transformed_dtypes}')\n return transformed_dtypes[0]\n\n\nclass SparseSampler(Preprocessor):\n \"\"\"Fused SparseCenterSelector and SparseCellGatherFeatures.\n\n This preprocessor expects features to contain the following keys:\n - lasers.points_xyz of shape [P, 3]\n - lasers.points_feature of shape [P, F]\n\n Adds the following features:\n anchor_centers - [num_centers, 3] - Floating point output containing the\n center (x, y, z) locations for tiling anchor boxes.\n\n cell_center_xyz - [num_centers, 3] - Floating point output containing\n the center (x, y, z) locations for each cell to featurize.\n\n cell_center_padding - [num_centers] - 0/1 padding for each center.\n\n cell_points_xyz - [num_centers, num_neighbors, 3] - Floating point\n output containing the (x, y, z) locations for each point for a given\n center.\n\n cell_feature - [num_centers, num_neighbors, F] - Floating point output\n containing the features for each point for a given center.\n\n cell_points_padding - [num_centers, num_neighbors] - 0/1 padding\n for the points in each cell.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('center_selector', 'farthest', 'Method to sample centers. '\n 'Valid options - uniform, farthest.')\n p.Define('neighbor_sampler', 'uniform', 'Method to select neighbors. '\n 'Valid options - uniform, closest.')\n p.Define('num_centers', 16, 'The number of centers to sample.')\n p.Define(\n 'features_preparation_layers', [],\n 'A list of Params for layers to run on the features before '\n 'performing farthest point sampling. For example, one may wish to '\n 'drop points out of frustum for KITTI before selecting centers. '\n 'Note that these layers will not mutate the original features, '\n 'instead, a copy will be made.')\n p.Define(\n 'keep_z_range', (-np.inf, np.inf),\n 'Only points that have z coordinates within this range are kept. '\n 'Approximate ground-removal can be performed by specifying a '\n 'lower-bound on the z-range.')\n p.Define('num_neighbors', 64, 'Sample these many points within the '\n 'neighorhood.')\n p.Define(\n 'max_distance', 1.0, 'Points with L2 distances from a center '\n 'larger than this threshold are not considered to be in the '\n 'neighborhood.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n if p.features_preparation_layers:\n self.CreateChildren('features_preparation_layers',\n p.features_preparation_layers)\n\n def TransformFeatures(self, features):\n p = self.params\n n, m = p.num_centers, p.num_neighbors\n\n prepared_features = features.DeepCopy()\n if p.features_preparation_layers:\n for prep_layer in self.features_preparation_layers:\n prepared_features = prep_layer.FPropDefaultTheta(prepared_features)\n\n points_data = prepared_features.lasers\n points = py_utils.HasShape(points_data.points_xyz, [-1, 3])\n\n if 'points_padding' in points_data:\n points_mask = 1 - points_data.points_padding\n points = tf.boolean_mask(points, points_mask)\n\n # If num_points < num_centers, pad points to have at least num_centers\n # points.\n num_points = tf.shape(points)[0]\n required_num_points = tf.maximum(num_points, p.num_centers)\n zeros = tf.zeros([required_num_points - num_points, 3])\n points = tf.concat([points, zeros], axis=0)\n\n num_seeded_points = points_data.get('num_seeded_points', 0)\n\n neighbor_algorithm = 'auto'\n # Based on benchmarks, the hash solution works better when the number of\n # centers is >= 16 and there are at least 10k points per point cloud.\n if p.num_centers >= 16:\n neighbor_algorithm = 'hash'\n\n centers, center_paddings, indices, indices_paddings = ops.sample_points(\n points=tf.expand_dims(points, 0),\n points_padding=tf.zeros([1, required_num_points], tf.float32),\n num_seeded_points=num_seeded_points,\n center_selector=p.center_selector,\n neighbor_sampler=p.neighbor_sampler,\n neighbor_algorithm=neighbor_algorithm,\n num_centers=p.num_centers,\n center_z_min=p.keep_z_range[0],\n center_z_max=p.keep_z_range[1],\n num_neighbors=p.num_neighbors,\n max_distance=p.max_distance,\n random_seed=p.random_seed if p.random_seed else -1)\n centers = py_utils.HasShape(centers, [1, n])[0, :]\n center_paddings = py_utils.HasShape(center_paddings, [1, n])[0, :]\n indices = py_utils.HasShape(indices, [1, n, m])[0, :]\n indices_paddings = py_utils.HasShape(indices_paddings, [1, n, m])[0, :]\n features.cell_center_padding = center_paddings\n features.cell_center_xyz = py_utils.HasShape(\n tf.gather(points, centers), [n, 3])\n features.anchor_centers = features.cell_center_xyz\n features.cell_points_xyz = py_utils.HasShape(\n tf.gather(points, indices), [n, m, 3])\n features.cell_feature = tf.gather(points_data.points_feature, indices)\n features.cell_points_padding = indices_paddings\n return features\n\n def TransformShapes(self, shapes):\n p = self.params\n n, m, f = p.num_centers, p.num_neighbors, shapes.lasers.points_feature[-1]\n shapes.anchor_centers = tf.TensorShape([n, 3])\n shapes.cell_center_padding = tf.TensorShape([n])\n shapes.cell_center_xyz = tf.TensorShape([n, 3])\n shapes.cell_points_xyz = tf.TensorShape([n, m, 3])\n shapes.cell_feature = tf.TensorShape([n, m, f])\n shapes.cell_points_padding = tf.TensorShape([n, m])\n return shapes\n\n def TransformDTypes(self, dtypes):\n dtypes.anchor_centers = tf.float32\n dtypes.cell_center_padding = tf.float32\n dtypes.cell_center_xyz = tf.float32\n dtypes.cell_points_xyz = tf.float32\n dtypes.cell_feature = tf.float32\n dtypes.cell_points_padding = tf.float32\n return dtypes\n"
] | [
[
"numpy.isposinf",
"tensorflow.python.ops.inplace_ops.empty",
"numpy.asarray",
"numpy.prod",
"numpy.array",
"numpy.linspace",
"numpy.isneginf"
]
] |
thegalang/lime | [
"a4cd83e20c838c782728c02f07e21ab01d17f3fa"
] | [
"lime/lime_tabular.py"
] | [
"\"\"\"\nFunctions for explaining classifiers that use tabular data (matrices).\n\"\"\"\nimport collections\nimport copy\nfrom functools import partial\nimport json\nimport warnings\n\nimport numpy as np\nimport sklearn\nimport sklearn.preprocessing\nfrom sklearn.utils import check_random_state\n\nfrom lime.discretize import QuartileDiscretizer\nfrom lime.discretize import DecileDiscretizer\nfrom lime.discretize import EntropyDiscretizer\nfrom lime.discretize import BaseDiscretizer\nfrom . import explanation\nfrom . import lime_base\n\n\nclass TableDomainMapper(explanation.DomainMapper):\n \"\"\"Maps feature ids to names, generates table views, etc\"\"\"\n\n def __init__(self, feature_names, feature_values, scaled_row,\n categorical_features, discretized_feature_names=None):\n \"\"\"Init.\n\n Args:\n feature_names: list of feature names, in order\n feature_values: list of strings with the values of the original row\n scaled_row: scaled row\n categorical_features: list of categorical features ids (ints)\n \"\"\"\n self.exp_feature_names = feature_names\n self.discretized_feature_names = discretized_feature_names\n self.feature_names = feature_names\n self.feature_values = feature_values\n self.scaled_row = scaled_row\n self.all_categorical = len(categorical_features) == len(scaled_row)\n self.categorical_features = categorical_features\n\n def map_exp_ids(self, exp):\n \"\"\"Maps ids to feature names.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n\n Returns:\n list of tuples (feature_name, weight)\n \"\"\"\n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n show_table=True,\n show_all=False):\n \"\"\"Shows the current example in a table format.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n show_table: if False, don't show table visualization.\n show_all: if True, show zero-weighted features in the table.\n \"\"\"\n if not show_table:\n return ''\n weights = [0] * len(self.feature_names)\n for x in exp:\n weights[x[0]] = x[1]\n out_list = list(zip(self.exp_feature_names,\n self.feature_values,\n weights))\n if not show_all:\n out_list = [out_list[x[0]] for x in exp]\n ret = u'''\n %s.show_raw_tabular(%s, %d, %s);\n ''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)\n return ret\n\n\nclass LimeTabularExplainer(object):\n \"\"\"Explains predictions on tabular (i.e. matrix) data.\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to the\n means and stds in the training data. For categorical features, perturb by\n sampling according to the training distribution, and making a binary\n feature that is 1 when the value is the same as the instance being\n explained.\"\"\"\n\n def __init__(self,\n training_data,\n mode=\"classification\",\n training_labels=None,\n feature_names=None,\n categorical_features=None,\n categorical_names=None,\n kernel_width=None,\n kernel=None,\n verbose=False,\n class_names=None,\n feature_selection='auto',\n discretize_continuous=True,\n discretizer='quartile',\n sample_around_instance=False,\n random_state=None):\n \"\"\"Init function.\n\n Args:\n training_data: numpy 2d array\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt (number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n sample_around_instance: if True, will sample continuous features\n in perturbed samples from a normal centered at the instance\n being explained. Otherwise, the normal is centered on the mean\n of the feature data.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n self.random_state = check_random_state(random_state)\n self.mode = mode\n self.categorical_names = categorical_names or {}\n self.sample_around_instance = sample_around_instance\n\n if categorical_features is None:\n categorical_features = []\n if feature_names is None:\n feature_names = [str(i) for i in range(training_data.shape[1])]\n\n self.categorical_features = list(categorical_features)\n self.feature_names = list(feature_names)\n\n self.discretizer = None\n if discretize_continuous:\n if discretizer == 'quartile':\n self.discretizer = QuartileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels)\n elif discretizer == 'decile':\n self.discretizer = DecileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels)\n elif discretizer == 'entropy':\n self.discretizer = EntropyDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels)\n elif isinstance(discretizer, BaseDiscretizer):\n self.discretizer = discretizer\n else:\n raise ValueError('''Discretizer must be 'quartile',''' +\n ''' 'decile', 'entropy' or a''' +\n ''' BaseDiscretizer instance''')\n self.categorical_features = list(range(training_data.shape[1]))\n discretized_training_data = self.discretizer.discretize(\n training_data)\n\n if kernel_width is None:\n kernel_width = np.sqrt(training_data.shape[1]) * .75\n kernel_width = float(kernel_width)\n\n if kernel is None:\n def kernel(d, kernel_width):\n return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))\n\n kernel_fn = partial(kernel, kernel_width=kernel_width)\n\n self.feature_selection = feature_selection\n self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)\n self.scaler = None\n self.class_names = class_names\n self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)\n self.scaler.fit(training_data)\n self.feature_values = {}\n self.feature_frequencies = {}\n\n for feature in self.categorical_features:\n if self.discretizer is not None:\n column = discretized_training_data[:, feature]\n else:\n column = training_data[:, feature]\n\n feature_count = collections.Counter(column)\n values, frequencies = map(list, zip(*(sorted(feature_count.items()))))\n\n self.feature_values[feature] = values\n self.feature_frequencies[feature] = (np.array(frequencies) /\n float(sum(frequencies)))\n self.scaler.mean_[feature] = 0\n self.scaler.scale_[feature] = 1\n\n @staticmethod\n def convert_and_round(values):\n return ['%.2f' % v for v in values]\n\n def explain_instance(self,\n data_row,\n predict_fn,\n labels=(1,),\n top_labels=None,\n num_features=10,\n num_samples=5000,\n distance_metric='euclidean',\n model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n predict_fn: prediction function. For classifiers, this should be a\n function that takes a numpy array and outputs prediction\n probabilities. For regressors, this takes a numpy array and\n returns the predictions. For ScikitClassifiers, this is\n `classifier.predict_proba()`. For ScikitRegressors, this\n is `regressor.predict()`. The prediction function needs to work\n on multiple feature vectors (the vectors randomly perturbed\n from the data_row).\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have model_regressor.coef_\n and 'sample_weight' as a parameter to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n data, inverse = self.__data_inverse(data_row, num_samples)\n scaled_data = (data - self.scaler.mean_) / self.scaler.scale_\n\n distances = sklearn.metrics.pairwise_distances(\n scaled_data,\n scaled_data[0].reshape(1, -1),\n metric=distance_metric\n ).ravel()\n\n yss = predict_fn(inverse)\n\n # for classification, the model needs to provide a list of tuples - classes\n # along with prediction probabilities\n if self.mode == \"classification\":\n if len(yss.shape) == 1:\n raise NotImplementedError(\"LIME does not currently support \"\n \"classifier models without probability \"\n \"scores. If this conflicts with your \"\n \"use case, please let us know: \"\n \"https://github.com/datascienceinc/lime/issues/16\")\n elif len(yss.shape) == 2:\n if self.class_names is None:\n self.class_names = [str(x) for x in range(yss[0].shape[0])]\n else:\n self.class_names = list(self.class_names)\n if not np.allclose(yss.sum(axis=1), 1.0):\n warnings.warn(\"\"\"\n Prediction probabilties do not sum to 1, and\n thus does not constitute a probability space.\n Check that you classifier outputs probabilities\n (Not log probabilities, or actual class predictions).\n \"\"\")\n else:\n raise ValueError(\"Your model outputs \"\n \"arrays with {} dimensions\".format(len(yss.shape)))\n\n # for regression, the output should be a one-dimensional array of predictions\n else:\n try:\n assert isinstance(yss, np.ndarray) and len(yss.shape) == 1\n except AssertionError:\n raise ValueError(\"Your model needs to output single-dimensional \\\n numpyarrays, not arrays of {} dimensions\".format(yss.shape))\n\n predicted_value = yss[0]\n min_y = min(yss)\n max_y = max(yss)\n\n # add a dimension to be compatible with downstream machinery\n yss = yss[:, np.newaxis]\n\n feature_names = copy.deepcopy(self.feature_names)\n if feature_names is None:\n feature_names = [str(x) for x in range(data_row.shape[0])]\n\n values = self.convert_and_round(data_row)\n\n for i in self.categorical_features:\n if self.discretizer is not None and i in self.discretizer.lambdas:\n continue\n name = int(data_row[i])\n if i in self.categorical_names:\n name = self.categorical_names[i][name]\n feature_names[i] = '%s=%s' % (feature_names[i], name)\n values[i] = 'True'\n categorical_features = self.categorical_features\n\n discretized_feature_names = None\n if self.discretizer is not None:\n categorical_features = range(data.shape[1])\n discretized_instance = self.discretizer.discretize(data_row)\n discretized_feature_names = copy.deepcopy(feature_names)\n for f in self.discretizer.names:\n discretized_feature_names[f] = self.discretizer.names[f][int(\n discretized_instance[f])]\n\n domain_mapper = TableDomainMapper(feature_names,\n values,\n scaled_data[0],\n categorical_features=categorical_features,\n discretized_feature_names=discretized_feature_names)\n ret_exp = explanation.Explanation(domain_mapper,\n mode=self.mode,\n class_names=self.class_names)\n ret_exp.scaled_data = scaled_data\n if self.mode == \"classification\":\n ret_exp.predict_proba = yss[0]\n if top_labels:\n labels = np.argsort(yss[0])[-top_labels:]\n ret_exp.top_labels = list(labels)\n ret_exp.top_labels.reverse()\n else:\n ret_exp.predicted_value = predicted_value\n ret_exp.min_value = min_y\n ret_exp.max_value = max_y\n labels = [0]\n\n for label in labels:\n (ret_exp.intercept[label],\n ret_exp.local_exp[label],\n ret_exp.score, ret_exp.local_pred) = self.base.explain_instance_with_data(\n scaled_data,\n yss,\n distances,\n label,\n num_features,\n model_regressor=model_regressor,\n feature_selection=self.feature_selection)\n\n if self.mode == \"regression\":\n ret_exp.intercept[1] = ret_exp.intercept[0]\n ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]\n ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]\n\n return ret_exp\n\n def __data_inverse(self,\n data_row,\n num_samples):\n \"\"\"Generates a neighborhood around a prediction.\n\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to\n the means and stds in the training data. For categorical features,\n perturb by sampling according to the training distribution, and making\n a binary feature that is 1 when the value is the same as the instance\n being explained.\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n num_samples: size of the neighborhood to learn the linear model\n\n Returns:\n A tuple (data, inverse), where:\n data: dense num_samples * K matrix, where categorical features\n are encoded with either 0 (not equal to the corresponding value\n in data_row) or 1. The first row is the original instance.\n inverse: same as data, except the categorical features are not\n binary, but categorical (as the original data)\n \"\"\"\n data = np.zeros((num_samples, data_row.shape[0]))\n categorical_features = range(data_row.shape[0])\n if self.discretizer is None:\n data = self.random_state.normal(\n 0, 1, num_samples * data_row.shape[0]).reshape(\n num_samples, data_row.shape[0])\n if self.sample_around_instance:\n data = data * self.scaler.scale_ + data_row\n else:\n data = data * self.scaler.scale_ + self.scaler.mean_\n categorical_features = self.categorical_features\n first_row = data_row\n else:\n first_row = self.discretizer.discretize(data_row)\n data[0] = data_row.copy()\n inverse = data.copy()\n for column in categorical_features:\n values = self.feature_values[column]\n freqs = self.feature_frequencies[column]\n inverse_column = self.random_state.choice(values, size=num_samples,\n replace=True, p=freqs)\n binary_column = np.array([1 if x == first_row[column]\n else 0 for x in inverse_column])\n binary_column[0] = 1\n inverse_column[0] = data[0, column]\n data[:, column] = binary_column\n inverse[:, column] = inverse_column\n if self.discretizer is not None:\n inverse[1:] = self.discretizer.undiscretize(inverse[1:])\n inverse[0] = data_row\n return data, inverse\n\n\nclass RecurrentTabularExplainer(LimeTabularExplainer):\n \"\"\"\n An explainer for keras-style recurrent neural networks, where the\n input shape is (n_samples, n_timesteps, n_features). This class\n just extends the LimeTabularExplainer class and reshapes the training\n data and feature names such that they become something like\n\n (val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)\n\n Each of the methods that take data reshape it appropriately,\n so you can pass in the training/testing data exactly as you\n would to the recurrent neural network.\n\n \"\"\"\n\n def __init__(self, training_data, mode=\"classification\",\n training_labels=None, feature_names=None,\n categorical_features=None, categorical_names=None,\n kernel_width=None, kernel=None, verbose=False, class_names=None,\n feature_selection='auto', discretize_continuous=True,\n discretizer='quartile', random_state=None):\n \"\"\"\n Args:\n training_data: numpy 3d array with shape\n (n_samples, n_timesteps, n_features)\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt(number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n\n # Reshape X\n n_samples, n_timesteps, n_features = training_data.shape\n training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(\n n_samples, n_timesteps * n_features)\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n\n # Update the feature names\n feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))\n for n in feature_names for i in range(n_timesteps)]\n\n # Send off the the super class to do its magic.\n super(RecurrentTabularExplainer, self).__init__(\n training_data,\n mode=mode,\n training_labels=training_labels,\n feature_names=feature_names,\n categorical_features=categorical_features,\n categorical_names=categorical_names,\n kernel_width=kernel_width,\n kernel=kernel,\n verbose=verbose,\n class_names=class_names,\n feature_selection=feature_selection,\n discretize_continuous=discretize_continuous,\n discretizer=discretizer,\n random_state=random_state)\n\n def _make_predict_proba(self, func):\n \"\"\"\n The predict_proba method will expect 3d arrays, but we are reshaping\n them to 2D so that LIME works correctly. This wraps the function\n you give in explain_instance to first reshape the data to have\n the shape the the keras-style network expects.\n \"\"\"\n\n def predict_proba(X):\n n_samples = X.shape[0]\n new_shape = (n_samples, self.n_features, self.n_timesteps)\n X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))\n return func(X)\n\n return predict_proba\n\n def explain_instance(self, data_row, classifier_fn, labels=(1,),\n top_labels=None, num_features=10, num_samples=5000,\n distance_metric='euclidean', model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 2d numpy array, corresponding to a row\n classifier_fn: classifier prediction probability function, which\n takes a numpy array and outputs prediction probabilities. For\n ScikitClassifiers , this is classifier.predict_proba.\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have\n model_regressor.coef_ and 'sample_weight' as a parameter\n to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n\n # Flatten input so that the normal explainer can handle it\n data_row = data_row.T.reshape(self.n_timesteps * self.n_features)\n\n # Wrap the classifier to reshape input\n classifier_fn = self._make_predict_proba(classifier_fn)\n return super(RecurrentTabularExplainer, self).explain_instance(\n data_row, classifier_fn,\n labels=labels,\n top_labels=top_labels,\n num_features=num_features,\n num_samples=num_samples,\n distance_metric=distance_metric,\n model_regressor=model_regressor)\n"
] | [
[
"numpy.sqrt",
"sklearn.utils.check_random_state",
"numpy.transpose",
"numpy.zeros",
"numpy.argsort",
"numpy.exp",
"sklearn.preprocessing.StandardScaler",
"numpy.array"
]
] |
OllieBoyne/dog-dynamics | [
"c472f984cb04e6dea932be6a42f4daaf174fb44c"
] | [
"dynamics/dynamics.py"
] | [
"\"\"\"DEFINES THE INVERSEDYNAMICS SOLVER, A Solver for solving the joint based model of a dog.\"\"\"\n\nfrom scipy import optimize, signal\n\nfrom data.data_loader import C3DData, load_force_plate_data, ForcePlateData, SMALData, get_delay_between, DataSources, \\\n\tpath_join\nfrom vis.utils import *\nfrom vis import visualisations\nfrom dynamics.footfall_detector import FootfallDetector\nfrom tqdm import tqdm\n\n# pure constants (no optimisation needed)\ng = 9.81\nfreq_forceplate = 100 # Hz\nfoot_joint_labels = [\"front left\", \"front right\", \"rear left\", \"rear right\"]\nfoot_joint_indices = [0, 9, 23, 20] # for set 2 3r3\n\n\nclass Model:\n\t\"\"\"ID Model, with all parameters derived/optimised\"\"\"\n\n\tdef __init__(self):\n\t\t# CONSTANTS\n\t\tself.paws = {}\n\t\tself.bone_density = 1950 # Estimate - needs refining! From paper: Development of a neuromusculoskeletal computer model in a chondrodystrophic dog.\n\t\tself.muscle_density = 1060 # From above\n\n\t\t# params to optimise\n\t\tself.bone_length_definitions = {\n\t\t\t\"normal\": lambda l: dict(inner_radius=0.01, outer_radius=0.05, displacement=0),\n\t\t\t\"body\": lambda l: dict(inner_radius=l / 20, outer_radius=l / 7, displacement=l / 4 - l / 20), }\n\n\t\t# Paw parameters. All scaled to be in standard form - exponent in separate dict.\n\t\tself.paw_params_normalised = {\n\t\t\t\"L0_front\": 6.9, # 6.9 # in .1mm\n\t\t\t\"L0_rear\": 6.9, # in .1mm\n\t\t\t\"k_front\": 3.42 * .18, # in kN/m\n\t\t\t\"k_rear\": 2.0 * .21, # in kN/m\n\t\t\t\"c_front\": 20,\n\t\t\t\"c_rear\": 20,\n\t\t\t\"k_rear_prop\": 0.85, # k = k_rear * m **.85\n\t\t\t\"frame_delay\": 0 # Used for analysis of paw treadmill forces. Not used for normal ID solver\n\t\t}\n\t\tself.paw_exponents = {\n\t\t\t\"L0_front\": -4,\n\t\t\t\"L0_rear\": -4,\n\t\t\t\"k_front\": 3,\n\t\t\t\"k_rear\": 3,\n\t\t\t\"c_front\": 0,\n\t\t\t\"c_rear\": 0,\n\t\t\t\"k_rear_prop\": 0,\n\t\t\t\"frame_delay\": 0\n\t\t}\n\n\t\tself.calc_paw_params()\n\n\t\tself.freq_par_data = 200\n\n\t\t# weightings used in dynamics calculations\n\t\tself.equation_weighting = {\n\t\t\t\"Inertial\": 2,\n\t\t\t\"Rotational\": 2,\n\t\t\t\"Leg spring\": 0.5,\n\t\t\t\"Paw spring\": 1,\n\t\t}\n\n\tdef calc_paw_params(self):\n\t\t\"\"\"Calculates paw parameters (separate function for optimisation purposes)\"\"\"\n\t\tfor param, val in self.paw_params_normalised.items():\n\t\t\tself.paws[param] = val * 10 ** (self.paw_exponents[param])\n\n\tdef edit_paw_param(self, param, val):\n\t\t\"\"\"Edit paw parameter (separate for optimisation purposes)\"\"\"\n\t\tself.paw_params_normalised[param] = val\n\t\tself.calc_paw_params()\n\n\nmodel = Model()\n\n\ndef time_deriv(X, dt):\n\t\"\"\"Finds the time derivative of a given series of data.\n\tAlways treats the first dimension as time - works for any number of dimensions (n_frames, M, N, O, ...).\n\tFor all except first and last val, calcs difference over 2 timesteps\"\"\"\n\n\tdiff = np.zeros_like(X)\n\n\tdiff[0] = X[1] - X[0]\n\tdiff[1:-1] = (X[2:] - X[:-2]) / 2\n\tdiff[-1] = X[-1] - X[-2]\n\n\treturn diff * 1 / dt\n\n\ndef nth_time_deriv(X, dt, n=2):\n\t\"\"\"Recursively get the nth time derivative\"\"\"\n\n\tif n == 1:\n\t\treturn time_deriv(X, dt)\n\telse:\n\t\treturn time_deriv(nth_time_deriv(X, dt, n=n - 1), dt)\n\n\ndef get_principal_axes(vector=Vector(1, 0, 0), cardinal=np.identity(3)):\n\t\"\"\"Given a vector, devise a basis of principle axis with any two perpendicular vectors (for application of an\n\taxisymmetric object - cylinder) \"\"\"\n\n\ti, j, k = cardinal\n\tK = vector.unit()\n\t# Now find any two perp vectors to K\n\tif not K.is_parallel(i):\n\t\tI = K.cross(i).unit()\n\t\tJ = K.cross(I).unit()\n\telse:\n\t\tI = K.cross(j).unit()\n\t\tJ = K.cross(I).unit()\n\n\treturn np.array([I, J, K])\n\n\ndef I_cylinder(density, length, radius):\n\tmass = density * np.pi * (radius ** 2) * length\n\tIxx, Izz = (length ** 2) / 12 + (radius ** 2) / 4, radius ** 2 / 2\n\treturn mass * np.diag([Ixx, Ixx, Izz])\n\n\nclass DoubleCylinder:\n\t\"\"\"An object comprised of a cylinder of given length between two end points, of radius inner_radius and density bone_density,\n\tand an outer cylinder that does NOT share the same central axis, of radius outer_radius, displaced by a distance <displacement> normally from the centerline.\n\n\tCylinder is defined with the centerline vertical (z direction), and the displacement always in the normal closest to the z direction downwards.\n\n\tFor InverseDynamics calculations, this object will have a start and end index, which correspond to the joint indices in which the end point data is held.\n\t\"\"\"\n\n\tdef __init__(self, start, end, length, inner_radius, outer_radius, displacement, freq=50.0, name=\"\"):\n\n\t\tself.name = name\n\t\tself.freq = freq # Frequency, in Hz\n\n\t\tself.start = start\n\t\tself.end = end\n\n\t\tself.length = length\n\t\tself.displacement = displacement\n\n\t\tif outer_radius is None: outer_radius = inner_radius\n\n\t\tself.inner_mass = model.bone_density * np.pi * inner_radius ** 2 * self.length\n\t\tself.outer_mass = model.muscle_density * np.pi * self.length * (outer_radius ** 2 - inner_radius ** 2)\n\t\tself.mass = self.inner_mass + self.outer_mass\n\n\t\tI_bone = I_cylinder(model.bone_density, length, inner_radius)\n\t\tI_muscle = I_cylinder(model.muscle_density, length, outer_radius) - I_cylinder(model.muscle_density, length,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t inner_radius)\n\n\t\t# By parallel axis theorem, add component of I due to outer radius being displaced from the centerline axis\n\t\tI_axis_displacement = np.zeros((3, 3))\n\t\tI_axis_displacement[0, 0] = self.outer_mass * displacement ** 2\n\n\t\tself.I = I_bone + I_muscle + I_axis_displacement # Inertia tensor in a reference frame in which the bone is lengthwise facing upwards\n\n\tdef get_kinematics(self, data):\n\t\t\"\"\"Given a numpy array of time, data, of shape (n_frames, 2, 3),\n\t\tgiving the position data of both ends of the cylinder over time, compute the kinematics of the cylinder\"\"\"\n\n\t\tX = self.X = np.array(data) # positions\n\t\tV = self.V = time_deriv(X, 1 / self.freq) # velocities\n\t\tA = self.A = time_deriv(V, 1 / self.freq) # accelerations\n\n\t\tself.XG = np.mean(X, axis=1) # average over X\n\t\tself.VG = np.mean(V, axis=1) # average over V\n\t\tself.AG = np.mean(A, axis=1) # average over A\n\n\t\t# Rotational\n\t\tR = self.R = [Vector(*x[1]) - Vector(*x[0]) for x in X] # Vector from bone start to end in each frame\n\t\tlocal_axes = [get_principal_axes(r) for r in R] # Get principal axes for each frame\n\n\t\t# theta_g = (n_frame, 3) of angular rotation about i, j, k for each frame\n\t\t# angular rotation about each axis is defined as 0 for the next vector in the cycle\n\t\t# i.e. angular rotation about i = 0 for a vector parallel to j\n\t\tzero_angles = [[0, 1, 0], [0, 0, 1], [1, 0, 0]] # definition of 'zero angle' vector for i, j, k\n\t\ttheta_g = []\n\n\t\t# Compute theta_g in local axes first, where K is the unit vector\n\t\tfor n_frame in range(len(X) - 1):\n\t\t\tlocal_ax = local_axes[n_frame]\n\t\t\t# representation as a a single rotation theta about an axis e (https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula)\n\t\t\ta = R[n_frame] # rotation from one frame...\n\t\t\tb = R[n_frame + 1] # ...to the next\n\t\t\tif np.array_equal(a, b):\n\t\t\t\ttheta_g += [[0, 0, 0]] # If no rotation, return 0\n\t\t\telse:\n\t\t\t\taxis = np.cross(a, b) / (np.linalg.norm(np.cross(a, b))) # unit vector of omega\n\t\t\t\twith np.errstate(invalid='raise'):\n\t\t\t\t\ttry:\n\t\t\t\t\t\talignment = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))\n\t\t\t\t\t\talignment = np.clip(alignment, a_min=-1,\n\t\t\t\t\t\t\t\t\t\t\ta_max=1) # clip between -1 and 1 to deal with rounding errors\n\t\t\t\t\t\tangle = np.arccos(alignment) # magnitude of theta\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))\n\t\t\t\t\t\traise ValueError(\"INVALID ANGLE\", a, b)\n\n\t\t\t\ttheta_g += [axis * angle]\n\n\t\ttheta_g = np.array(theta_g)\n\t\tself.theta_g = signal.savgol_filter(theta_g, window_length=19, polyorder=2, axis=0)\n\t\tself.omega_g = time_deriv(self.theta_g, dt=1 / self.freq)\n\t\tself.alpha_g = time_deriv(self.omega_g, dt=1 / self.freq) # angular acceleration\n\n\t\tself.I_fixed = [la.T @ self.I @ la for la in local_axes] # compute I in fixed reference frame at each frame\n\n\tdef get_dynamics(self):\n\t\t\"\"\"Compute dynamics (F_net, Torque_net) at each frame\"\"\"\n\t\tself.F_net = [self.mass * a_g for a_g in self.AG]\n\t\tself.tau_net = [I_f @ alpha for (I_f, alpha) in zip(self.I_fixed, self.alpha_g)]\n\n\nclass Body(DoubleCylinder):\n\t\"\"\"A unique case of double cylinder, where the (multiple) joints connect at the cylindrical surface at either end.\n\tThese joint attachments are defined by an angle from the i direction normal to the centerline at initialisation.\n\n\tDynamics for the body then must be calculated using a separate set of equations. Define the body such that all\n\tjoints bones go into it rather than out of it (i.e. all input forces are positive on the body) \"\"\"\n\n\tdef __init__(self, start_joints, end_joints, all_joint_positions, **cylinder_kwaargs):\n\t\t\"\"\"From the indices given by start_joints and end_joints, identify a cylinder shape that best fits these\n\t\tpoints on either side, and create that as the cylinder. \"\"\"\n\n\t\tself.start_joints = start_joints\n\t\tself.end_joints = end_joints\n\n\t\tstart_pos = Vector(*np.mean(all_joint_positions[40, start_joints], axis=0))\n\t\tend_pos = Vector(*np.mean(all_joint_positions[40, end_joints], axis=0))\n\n\t\tlength = start_pos > end_pos\n\n\t\tsuper().__init__(start=None, end=None, length=length, **model.bone_length_definitions[\"body\"](length),\n\t\t\t\t\t\t **cylinder_kwaargs)\n\n\tdef get_centre_of_gravity(self, start: 'Vector', end: 'Vector'):\n\t\t\"\"\"Calculates the centre of gravity based on the displacement from the centerline.\"\"\"\n\t\tcentreline_g = 0.5 * (start + end)\n\n\t\t# to find a normal that is closest to z, find N possible equispaced normals, and see which one has the greatest .k product\n\t\tnormal = (start - end).find_normal()\n\t\tN = 20 # number of normals to consider\n\t\tall_normals = [normal.rotate_about((start - end).unit(), angle=(n * 2 * np.pi / N)) for n in range(N)]\n\n\t\tidx = np.argmax([v.dot(Vector(0, 0, -1)) for v in all_normals])\n\t\tchosen_normal = all_normals[idx] # choose most downwards normal\n\n\t\treturn centreline_g + self.displacement * chosen_normal\n\n\tdef get_kinematics(self, data):\n\t\t\"\"\"For body, data is of shape (n_frames, 2, 2, 3), where it is split by rear and front.\n\t\tSo average across rear and front to get behaviour of centerline, and then run normal get_kinematics\"\"\"\n\t\tsuper().get_kinematics(np.mean(data, axis=2))\n\n\ndef weighted_bound_least_squares(A, b, weights=None, bounds=None, **kwargs):\n\t\"\"\"Completes a least squares solve of the equation A x = b, to solve N unknowns from M equations\n\twhere A is an M x N matrix, x is an N x 1 vector, and b is an M x 1 vector.\n\tApplies weightings to each row to favour certain datapoints. weights is an M x 1 vector.\n\n\tApplies bounds where bounds is an M x 2 array. each tuple in the array gives the LB and UB for the given equation\"\"\"\n\n\tif weights is None: weights = np.ones_like(b) # If no weight given, equal weight to all\n\tw = np.array(weights)\n\n\tweighted_A, weighted_b = np.array(A) * w[:, np.newaxis], np.array(b) * w # Apply weights to A, b\n\n\ttry:\n\t\tsolve = optimize.lsq_linear(weighted_A, weighted_b, bounds=list(zip(*bounds)), tol=1e-2)\n\t\treturn solve[\"x\"]\n\n\texcept np.linalg.LinAlgError as e:\n\t\tout = f\"SVD did not converge in Lin Least Sq. Printing params: {A}, {b}\"\n\t\traise ArithmeticError(out)\n\n\nclass InverseDynamicsSolver:\n\t\"\"\"Through scipy optimisation, Skeleton finds a set of force data that corresponds to the correct kinematic data.\n\tTakes a skeleton, and the relevant bones and joints, and solves the set of forces that correspond to correct kinematics.\"\"\"\n\n\tdef __init__(self, joint_data, target_bones, body_joints, no_torque_joints=None, no_reaction_joints=None,\n\t\tfoot_joints=None, leg_spring_joints=None, model=Model(),\n\t\tfreq=50.0, name=\"output\", is_mocap=True):\n\n\t\tfor var in [foot_joints, leg_spring_joints, no_reaction_joints, no_torque_joints]:\n\t\t\tif var is None:\n\t\t\t\tvar = []\n\n\t\tself.name = name\n\t\tself.freq = freq\n\t\tself.n_frames, self.n_joints, _ = joint_data.shape\n\n\t\tself.model = model\n\t\tself.is_mocap = is_mocap\n\n\t\t# Preprocess joint data - basic smoothing\n\t\tif is_mocap:\n\t\t\twindow_length = self.freq // 2\n\t\telse:\n\t\t\twindow_length = 0.75 * self.freq\n\n\t\tif window_length % 2 == 0: window_length -= 1\n\n\t\tself.T = self.n_frames / self.freq\n\n\t\tself.smooth = lambda X, p=5: signal.savgol_filter(X, window_length=int(window_length), polyorder=p, axis=0)\n\n\t\tp = 5 if self.is_mocap else 2\n\n\t\tself.unsmoothed_data = joint_data # save unsmoothed data for other uses\n\t\tself.joint_pos = self.smooth(joint_data, p=p)\n\t\tself.joint_vel = time_deriv(self.joint_pos, 1 / freq)\n\t\tself.joint_accel = time_deriv(self.smooth(self.joint_vel), 1 / freq)\n\n\t\tself.foot_joints = foot_joints\n\t\tself.body_joints = body_joints\n\t\tself.get_foot_joint_from_index = {} # Identify which foot from the index\n\t\tfor fj in self.foot_joints:\n\t\t\tfor bone, (j1, j2) in target_bones.items():\n\t\t\t\tif fj in [j1, j2]:\n\t\t\t\t\tself.get_foot_joint_from_index[fj] = bone\n\n\t\tself.no_torque_joints = no_torque_joints\n\t\tself.no_reaction_joints = no_reaction_joints\n\n\t\tself.target_bones_dict = target_bones # for use in plotting\n\t\tself.target_bones = []\n\n\t\tself.total_mass = 0\n\n\t\tfor bone, (joint1, joint2) in target_bones.items():\n\t\t\t# Calculate length using the initial positions of jointA and B.\n\t\t\t# Smoothing functions can cause the issues for the first few frames, so take avg of later frames\n\n\t\t\tframes = [50, 51, 52, 53, 54, 55, 56]\n\t\t\tn_averaging = len(frames)\n\t\t\tlength = 0\n\n\t\t\tfor frame in frames:\n\t\t\t\tposA = Vector(*self.joint_pos[frame, joint1])\n\t\t\t\tposB = Vector(*self.joint_pos[frame, joint2])\n\n\t\t\t\tif posA.length() == 0 or posB.length() == 0:\n\t\t\t\t\tn_averaging -= 1\n\t\t\t\telse:\n\t\t\t\t\tlength += posA > posB\n\n\t\t\tlength = length / n_averaging # avg of all the frames data taken from\n\n\t\t\tif length == 0:\n\t\t\t\tprint(f\"Warning: Error in calculating length of '{bone}'\")\n\t\t\t\tlength = 0.01\n\n\t\t\tb = DoubleCylinder(start=joint1, end=joint2, length=length, name=bone, freq=freq,\n\t\t\t\t\t\t\t **self.model.bone_length_definitions[\"normal\"](length))\n\n\t\t\tself.target_bones.append(b) # add bone to list\n\t\t\tself.total_mass += b.mass\n\n\t\tself.body = Body(*body_joints, self.joint_pos, freq=freq, name=\"body\")\n\n\t\tself.body.get_kinematics(\n\t\t\tnp.stack([self.joint_pos[:, body_joints[0]], self.joint_pos[:, body_joints[1]]], axis=1))\n\t\tself.body.get_dynamics()\n\n\t\tself.total_mass += self.body.mass\n\n\t\t# Paw parameters\n\t\tm = self.total_mass\n\t\tpaw_d = self.model.paws\n\t\tself.L0_paws = {\"front\": paw_d[\"L0_front\"] * m, \"rear\": paw_d[\"L0_rear\"] * m}\n\t\tself.k_paws = {\"front\": paw_d[\"k_front\"] * m, \"rear\": paw_d[\"k_rear\"] * m ** paw_d[\"k_rear_prop\"]}\n\t\tself.c_paws = {\"front\": paw_d[\"c_front\"] * m, \"rear\": paw_d[\"c_rear\"] * m}\n\n\t\t# if self.model.equation_weighting['Paw spring'] > 0:\n\t\tself.set_paw_equilibrium()\n\n\t\tself.get_dynamics()\n\t\tself.leg_spring_joints = leg_spring_joints\n\n\t\tself.calc_leg_lengths()\n\n\t\tself.equation_weighting = model.equation_weighting\n\n\tdef get_dynamics(self):\n\t\t\"\"\"Gets dynamics of centre of mass of each bone & body\"\"\"\n\t\tfor bone in self.target_bones:\n\t\t\tbone.get_kinematics(self.joint_pos[:, [bone.start, bone.end]])\n\t\t\tbone.get_dynamics()\n\n\t\tbody = self.body\n\t\tbody.get_kinematics(\n\t\t\tnp.stack([self.joint_pos[:, body.start_joints], self.joint_pos[:, body.end_joints]], axis=1))\n\t\tbody.get_dynamics()\n\n\tdef calculate_forces(self, n_frame, report_equations=True):\n\t\t\"\"\"\n\t\tSets up a system of linear equations governing the motion of the skeleton at a given frame.\n\n\t\tThese equations are:\n\n\t\t- FREE JOINTS:\n\t\tThe torques at free joints are zero. Free joints are joints only connected to one bone, on the end of the body eg the feet\n\n\t\t- INERTIA:\n\t\tOn each bone, the sum of the two joint forces is equal to the mass * acceleration of the bone\n\n\t\t- ROTATION:\n\t\tOn each bone, the net torque about the bone is equal to the I * alpha_g of the bone\n\n\t\t- BODY:\n\t\tThe body is set up as a slightly different type of bone, in which it has several joints connected at either end, and its position is dictated by all of those joints.\n\t\tSee the code for it below, it has its own set of inertial and rotational equations.\n\n\t\tThis is set up as a least squares problem Ax = b, where A is a matrix of coefficients to multiply the unknowns by,\n\t\tx is the unknowns (in the form [F_1_x, F_1_y, F_1_z, F_2_x, ... T_1, T, ...]\n\t\tb is the result of the equations.\n\n\t\tA weighting is also applied to each row to weight the least squares problem (eg to priorities free joint equations)\n\n\t\tThe problem also has bounds applied to it. For now, these bounds are simply that foot joint vertical reaction forces are non negative.\n\n\t\tImprovements:\n\t\t- Replace the current spinal system with a large non axisymmetric cylinder to represent the body\n\t\t- Add a sphere to represent the head\n\n\t\t\"\"\"\n\n\t\t# Consult report for explanation of system\n\n\t\tA = []\n\t\tb = []\n\t\tweights = [] # Collect weightings for each equation as they are added to the system\n\t\tequation_weighting = self.equation_weighting\n\n\t\t# Reasonable bounds for each force, and for each torque. Current limits set at 10 * weight for mass, 10 * mass at one metre for torque\n\t\tmax_force = 3 * self.total_mass * g\n\t\tmax_torque = 3 * self.total_mass * g\n\t\t# bounds can be adjusted further for specific joints (eg no downards reaction at the feet)\n\t\tbounds = [(-max_force, max_force)] * (3 * self.n_joints) + [(-max_torque, max_torque)] * (self.n_joints)\n\n\t\tdef A_row(vals={}):\n\t\t\t\"\"\"Returns a row of 0s length 4 * self.n_joints, with other vectors in any indices in vals.\n\t\t\tvals is a dict of index:vector\"\"\"\n\t\t\trow = [0.0] * 4 * self.n_joints\n\t\t\tfor index, val in vals.items():\n\t\t\t\trow[index] = val\n\t\t\treturn row\n\n\t\tdef add_blank_row():\n\t\t\tA.append(A_row({}))\n\t\t\tb.append(0)\n\t\t\tweights.append(0)\n\n\t\tdef add_n_blank_rows(n=1):\n\t\t\tfor i in range(n): add_blank_row()\n\n\t\tnull, unit, g_vec = Vector(0, 0, 0), Vector(1, 1, 1), Vector(0, 0, -g)\n\n\t\tn_joints = self.n_joints\n\n\t\tdef get_index(joint, dimension=0, is_force=True):\n\t\t\t\"\"\"Get correct index of D\"\"\"\n\t\t\treturn (3 * n_joints * int(not is_force)) + ([1, 3][is_force] * joint) + dimension\n\n\t\t# dimension = 0 for x, 1 for y, 2 for z\n\n\t\t# First, add the equations to show that the torques in each of the foot joints are zero\n\t\tfor no_torque_joint in self.no_torque_joints:\n\t\t\t# Set up the equation 1 * tau_{foot_joint} = 0\n\t\t\t# BOUNDARY CONDITIONS ARE FIXED, RATHER THAN AN ADDITIONAL EQUATION. SO INCORPORATE THEM INTO BOUNDS\n\t\t\tbounds[get_index(no_torque_joint, is_force=False)] = (0, 1e-10)\n\n\t\tfor no_reaction_joint in self.no_reaction_joints: # BC : no reactions\n\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\tbounds[get_index(no_reaction_joint, dimension=dim, is_force=True)] = (0, 1e-10)\n\n\t\tfor foot_joint in self.foot_joints:\n\t\t\t## If the feet are a certain amount off the ground for that foot, also assign the reaction forces to be zero\n\t\t\tbone_name = self.get_foot_joint_from_index[foot_joint]\n\n\t\t\tend = bone_name.split(\" \")[1] # get 'front' or 'rear'\n\t\t\tL0 = self.L0_paws[end] # get stiffness from 'front' or 'rear' in bone name\n\t\t\t# L0 = self.paw_equilibrium_values[foot_joint]\n\n\t\t\tk_paw = self.k_paws[end]\n\t\t\tc_paw = self.c_paws[end]\n\n\t\t\tpaw_disp = self.paw_disps[foot_joint][n_frame]\n\n\t\t\tpaw_off_ground = self.joint_pos[n_frame, foot_joint, 2] >= L0 # BC: no reaction in foot off ground\n\t\t\tpaw_off_ground = paw_disp == 0\n\n\t\t\tif paw_off_ground: # BC: no reaction in foot off ground\n\t\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\t\tbounds[get_index(foot_joint, dimension=dim, is_force=True)] = (0, 1e-10)\n\n\t\t\t\tadd_n_blank_rows(4) # for consistency of number of eqns\n\n\t\t\telse: # If paw near ground, add force due to spring\n\n\t\t\t\theight = self.unsmoothed_data[n_frame, foot_joint, 2]\n\n\t\t\t\teps = L0 - height # min((L0 - height), L0/2)\n\t\t\t\teps_dot = self.joint_vel[n_frame, foot_joint, 2]\n\n\t\t\t\tF_damp = 0 # c_paw * eps_dot\n\n\t\t\t\tif self.model.equation_weighting['Paw spring'] > 0:\n\t\t\t\t\t## PAW SPRING MODEL\n\t\t\t\t\teps = paw_disp\n\t\t\t\t\tF_spring = k_paw * eps + c_paw * eps_dot\n\n\t\t\t\t\tif foot_joint != 20:\n\t\t\t\t\t\tA.append(A_row({get_index(foot_joint, dimension=2, is_force=True): 1}))\n\t\t\t\t\t\tb.append(F_spring + F_damp)\n\t\t\t\t\t\tweights.append(equation_weighting[\"Paw spring\"])\n\n\t\t\t\tif self.model.equation_weighting['Leg spring'] > 0:\n\t\t\t\t\t## LEG SPRING MODEL\n\t\t\t\t\tK = 3000 if end == \"front\" else 2000\n\t\t\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\t\t\t# component = self.leg_vecs[foot_joint][n_frame][dim]\n\t\t\t\t\t\tF_spring = self.leg_disps[foot_joint][n_frame] * K # * component\n\t\t\t\t\t\tA.append(A_row({get_index(foot_joint, dimension=dim, is_force=True): 1}))\n\t\t\t\t\t\tb.append(F_spring + F_damp)\n\t\t\t\t\t\tweights.append(equation_weighting[\"Leg spring\"])\n\n\t\t\t\t# Set bounds for foot joints to only have positive vertical reactions\n\t\t\t\tbounds[get_index(foot_joint, dimension=2, is_force=True)] = (0, max_force)\n\t\t\t\tbounds[get_index(foot_joint, dimension=1, is_force=True)] = (0, 1e-10) # set Fy=0\n\n\t\tfor bone in self.target_bones:\n\t\t\tj_1, j_2 = bone.start, bone.end\n\t\t\tx_1, x_2 = bone.X[n_frame]\n\n\t\t\t# F_1 + F_2 + F_grav = F_net\n\t\t\tF_net = bone.F_net[n_frame]\n\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\tA.append(A_row({get_index(j_1, dim): 1, get_index(j_2, dim): - 1}))\n\t\t\t\tb.append((F_net - bone.mass * g_vec)[dim])\n\t\t\t\tweights.append(equation_weighting[\"Inertial\"])\n\n\t\t\ttau_net = bone.tau_net[n_frame]\n\t\t\tx_g = bone.XG[n_frame]\n\t\t\tr_1, r_2 = (x_1 - x_g), (x_2 - x_g)\n\n\t\t\t# direction of each T is perpendicular to the bones that the joint is on\n\t\t\tadjacent_1_bone = [b for b in self.target_bones if b.end == j_1 and b != bone]\n\t\t\tif len(adjacent_1_bone) == 1: # if there is an adjacent bone\n\t\t\t\tadj_bone = adjacent_1_bone[0]\n\t\t\t\tT_1_dir = Vector(*r_1).cross((adj_bone.X[n_frame, 1] - adj_bone.XG[n_frame])).unit()\n\t\t\tif len(adjacent_1_bone) == 0 or np.isnan(T_1_dir).any(): # if no adjacent, or if above calc causes error\n\t\t\t\tT_1_dir = (0, 1, 0) # Improve later, for now say all torques about y axis\n\n\t\t\tadjacent_2_bone = [b for b in self.target_bones if b.start == j_2 and b != bone]\n\t\t\tif len(adjacent_2_bone) == 1: # if there is an adjacent bone\n\t\t\t\tadj_bone = adjacent_2_bone[0]\n\t\t\t\tT_2_dir = Vector(*r_2).cross((adj_bone.X[n_frame, 0] - adj_bone.XG[n_frame])).unit()\n\t\t\tif len(adjacent_2_bone) == 0 or np.isnan(T_2_dir).any(): # if no adjacent, or if above calc causes error\n\t\t\t\tT_2_dir = (0, 1, 0) # Improve later, for now say all torques about y axis\n\n\t\t\tfor dim in [0, 1, 2]:\n\t\t\t\t# This loop essentially writes out the following equations into A and b for each dimension (x,y,z):\n\t\t\t\t# r1 x F1 + r2 x F2 + T1 + T2 = T_net\n\n\t\t\t\t# The cross product of r = (x,y,z) and F = (Fx, Fy, Fz) yields (Fz*y - Fy*z, ...)\n\t\t\t\t# Take the x component, x -> Fz*y - Fy*z\n\t\t\t\t# Notice that Fy is negative, and Fz is positive. This is always true, that, for the forces, one lower dimension than the current is positive, and one higher is negative (cyclical relations)\n\t\t\t\t# use this below\n\n\t\t\t\t# Get dim above and below, wrapping round for below x and above z\n\t\t\t\tdim_below = (dim - 1) % 3\n\t\t\t\tdim_above = (dim + 1) % 3\n\n\t\t\t\tcoeff_dict = {\n\t\t\t\t\tget_index(j_1, dim): 0,\n\t\t\t\t\t# eg no effect of F_x in the x directional torque (not relevant statement, only here for readability)\n\t\t\t\t\tget_index(j_1, dim_above): - r_1[dim_below], # eg multiply - z by Fy in the x direction\n\t\t\t\t\tget_index(j_1, dim_below): r_1[dim_above], # eg multiply y by Fz in the x direction\n\n\t\t\t\t\t# Reversed polarity for joint 2 as the desired force is - F2\n\t\t\t\t\tget_index(j_2, dim_above): r_2[dim_below],\n\t\t\t\t\tget_index(j_2, dim_below): - r_2[dim_above],\n\n\t\t\t\t\t# Add the torques on each joint\n\t\t\t\t\tget_index(j_1, is_force=False): T_1_dir[dim],\n\t\t\t\t\tget_index(j_2, is_force=False): -T_2_dir[dim]\n\n\t\t\t\t}\n\n\t\t\t\tA.append(A_row(coeff_dict))\n\t\t\t\tb.append(tau_net[dim])\n\t\t\t\tweights.append(equation_weighting[\"Rotational\"])\n\n\t\t### SOLVE FORCES ON BODY. Note body defined so all joint forces/torques on it are positive\n\t\tbody = self.body\n\t\tF_net = body.F_net[n_frame]\n\n\t\t# BODY INERTIAL FORCES\n\t\tfor dim in [0, 1, 2]:\n\t\t\tA.append(A_row({get_index(j, dim): 1 for j in self.body.start_joints + self.body.end_joints}))\n\t\t\tb.append((F_net - body.mass * g_vec)[dim])\n\t\t\tweights.append(equation_weighting[\"Inertial\"])\n\n\t\t# BODY ROTATIONAL FORCES - same as for bones\n\t\tx_g = body.XG[n_frame]\n\t\ttau_net = body.tau_net[n_frame]\n\n\t\t# Improve above later, for now say all torques about y axis\n\t\tT_dir = (0, 1, 0)\n\n\t\tfor dim in [0, 1, 2]:\n\t\t\tcoeff_dict = {}\n\t\t\tfor joint in body.start_joints + body.end_joints:\n\t\t\t\tx_j = self.joint_pos[n_frame, joint]\n\t\t\t\tr_j = (x_j - x_g) # position vector to centre\n\n\t\t\t\t# Get dim above and below, wrapping round for below x and above z\n\t\t\t\tdim_below, dim_above = (dim - 1) % 3, (dim + 1) % 3\n\n\t\t\t\tcoeff_dict[get_index(joint, dim_above)] = -r_j[dim_below] # eg multiply - z by Fy in the x direction\n\t\t\t\tcoeff_dict[get_index(joint, dim_below)] = r_j[dim_above] # eg multiply y by Fz in the x direction\n\n\t\t\t\tcoeff_dict[get_index(joint, is_force=False)] = T_dir[dim] # Add pure torque of pin\n\n\t\t\tA.append(A_row(coeff_dict))\n\t\t\tb.append(tau_net[dim])\n\t\t\tweights.append(equation_weighting[\"Rotational\"])\n\n\t\t# print each line of the equations defined by A, b, with the final result\n\t\t# Only print variables with both non-zero values, and non-zero coefficients\n\t\tif report_equations:\n\t\t\tprint(f\"----Frame {n_frame}----\")\n\t\t\tparams = []\n\n\t\t\tfor joint in range(self.n_joints):\n\t\t\t\tfor dim in \"xyz\":\n\t\t\t\t\tparams.append(F\"F_{joint}_{dim}\") # Add forces by joint\n\n\t\t\tfor joint in range(self.n_joints):\n\t\t\t\tparams.append(F\"T_{joint}\") # Add torques by joint\n\n\t\t\tfor n, (coeffs, result) in enumerate(zip(A, b)):\n\t\t\t\ts = []\n\t\t\t\tfor j, (coeff, param) in enumerate(zip(coeffs, params)):\n\t\t\t\t\tif coeff != 0:\n\t\t\t\t\t\ts.append(f\"{round(coeff, 3)} * {param}\")\n\n\t\t\t\t# b_actual = np.dot(A[n], D)\n\t\t\t\t# pct_error = abs(100 * (b_actual - result) / b_actual)\n\t\t\t\tif n <= 7:\n\t\t\t\t\tprint(f\"{' + '.join(s)} = {round(result, 3)}\") # ({round(b_actual, 3)}) [{round(pct_error, 2)}%]\")\n\n\t\treturn A, b, weights, bounds\n\n\tdef solve_forces(self, report_equations=False, end_frames_disregarded=5, prefix=\"\",\n\t\t\t\t\t save=True):\n\t\t\"\"\"Solves the forces at each frame for the system, collects them and saves them to .npy files.\n\n\t\tNote: Currently, due to smoothing, the first 5 and last 5 frames are disregarded\"\"\"\n\n\t\tself.get_dynamics()\n\t\tn_joints = self.n_joints\n\n\t\tif report_equations:\n\t\t\tprint(\"Solving system...\")\n\t\t\tprint(f\"Total mass {round(self.total_mass, 2)} kg.\")\n\n\t\t# If dir doesn't exist, make it\n\t\tdir = path_join(DataSources.dynamics_data, self.name)\n\t\tif self.name not in os.listdir(DataSources.dynamics_data):\n\t\t\tos.mkdir(dir)\n\n\t\tforces, torques = [], []\n\n\t\tf_shape, t_shape = (self.n_joints, 3), (self.n_joints,)\n\t\t# Add zeros either end due to not being able to calculate for the first or last 2 frames\n\t\tfor i in range(end_frames_disregarded):\n\t\t\tforces.append(np.zeros(f_shape))\n\t\t\ttorques.append(np.zeros(t_shape))\n\n\t\tcalc_forces = []\n\t\tcalc_torques = []\n\n\t\tprogress = tqdm(total=self.n_frames - 2 * end_frames_disregarded)\n\t\tfor n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):\n\t\t\tA, b, weights, bounds = self.calculate_forces(n_frame, report_equations=report_equations)\n\n\t\t\tD = weighted_bound_least_squares(A, b, weights, bounds, rcond=None)\n\n\t\t\tf, tau = D[:(3 * n_joints)], D[(3 * n_joints):]\n\n\t\t\tf, tau = f.reshape((n_joints, 3)), tau.reshape((n_joints))\n\n\t\t\tcalc_forces.append(f)\n\t\t\tcalc_torques.append(tau)\n\n\t\t\tprogress.update()\n\n\t\tforces[end_frames_disregarded: - end_frames_disregarded] = calc_forces\n\t\ttorques += calc_torques\n\n\t\tfor i in range(end_frames_disregarded):\n\t\t\tforces.append(np.zeros(f_shape))\n\t\t\ttorques.append(np.zeros(t_shape))\n\n\t\tif save:\n\t\t\tnp.save(path_join(dir, prefix + \"forces.npy\"), forces)\n\t\t\tnp.save(path_join(dir, prefix + \"torques.npy\"), torques)\n\n\t\treturn np.array(forces), np.array(torques)\n\n\tdef get_com_position(self):\n\t\t\"\"\"Calculates the position of the centre of mass of the whole system at each timestep\"\"\"\n\t\treturn sum(b.XG * b.mass for b in self.target_bones + [self.body]) / self.total_mass\n\n\tdef return_equations(self, end_frames_disregarded=5):\n\t\t\"\"\"For each frame, return the equation vector b\"\"\"\n\t\tself.get_dynamics()\n\t\tbs = []\n\n\t\tfor n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):\n\t\t\tA, b, weights, bounds = self.calculate_forces(n_frame, report_equations=False)\n\t\t\tbs.append(b)\n\n\t\treturn np.array(bs)\n\n\tdef set_paw_equilibrium(self):\n\t\t\"\"\"Get paw equilibrium from mocap data by finding the drop of the paw.\n\t\tThis method will work for the current dataset, but is likely not robust, so can be replaced with\n\t\ta better method of finding the paw equilibrium at a later date\"\"\"\n\n\t\tif self.is_mocap:\n\t\t\tpaw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]\n\n\t\telse:\n\t\t\tpaw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]\n\n\n\t\tself.paw_disps = {} # paw joint: displacement over time, for paw spring model\n\n\t\tmin_contacts_detected = 3 # minimum requirement to use peak detection mode\n\n\t\tplot = True\n\t\tif plot:\n\t\t\tfig, axes = plt.subplots(nrows=2, ncols=2)\n\n\t\tfootfall_detector = FootfallDetector(train=False, load=True, name=[\"smal\", \"mocap\"][self.is_mocap])\n\t\tfor n, paw in enumerate(self.foot_joints):\n\t\t\tcontact_ends_failed = False\n\t\t\tdisp = np.zeros((self.n_frames)) # will give eps - the displacement of the paw from equilibrium\n\t\t\t# for when the paw is in contact with the ground\n\n\t\t\tZ = paw_z_heights[:, n]\n\n\t\t\ton_ground = footfall_detector.process_clip(Z)\n\t\t\ton_ground_idxs = np.where(on_ground > 0)[0]\n\n\t\t\tif plot:\n\t\t\t\taxes[n // 2, n % 2].plot(Z.mean() * (on_ground), color=\"red\", alpha=0.3)\n\n\t\t\tmin_footfall_width = 3 # 3 frames long minimum to count as a footfall\n\t\t\tfootfalls = consecutive(on_ground_idxs)\n\t\t\ttrigger_height = np.percentile(np.array([Z[ff].max() for ff in footfalls]), 25) # mean trigger height\n\t\t\tfor footfall in footfalls:\n\t\t\t\tif len(footfall) > min_footfall_width:\n\t\t\t\t\t# disp[footfall] = Z[footfall].max() - Z[footfall] # old\n\t\t\t\t\tdisp[footfall] = np.clip(trigger_height - Z[footfall], a_min=0, a_max=None)\n\n\t\t\tself.paw_disps[paw] = disp\n\n\t\t\tif plot:\n\t\t\t\tax = axes[n // 2, n % 2]\n\t\t\t\tax.plot(Z)\n\n\t\t\t\tZ_on_ground = Z.copy()\n\t\t\t\tZ_on_ground[disp == 0] = np.nan\n\t\t\t\tax.plot(Z_on_ground, color=\"green\")\n\t\t\t\tax.plot(disp)\n\t\t\t\tZ_smoothed = self.joint_pos[:, paw, 2]\n\n\t\t\t\tax.set_title(n)\n\n\t\tif plot:\n\t\t\tplt.show(block=False)\n\t\t\tplt.draw()\n\t\t\tplt.pause(1e-8)\n\n\tdef view_ground_displacements(self, deriv=0):\n\t\t\"\"\"Plot and show a graph of vertical displacement against frames for each paw - identifying L0 for each paw\"\"\"\n\n\t\tfig, axes = plt.subplots(nrows=4)\n\t\tfor n, j in enumerate(self.foot_joints):\n\t\t\tlabel = foot_joint_labels[n]\n\t\t\tax = axes[n]\n\t\t\tif deriv == 0:\n\t\t\t\tX = self.joint_pos[:, j, 2]\n\t\t\t\tX_unsmoothed = self.unsmoothed_data[:, j, 2]\n\t\t\t\tax.plot(X)\n\t\t\t\tax.plot(X_unsmoothed, alpha=.6)\n\t\t\t\t# ax.axhline(self.paw_equilibrium_values[j], ls = \"--\")\n\t\t\t\tax.axhline(self.L0_paws[label.split(\" \")[0]])\n\t\t\telif deriv == 1:\n\t\t\t\tax.plot(self.joint_vel[:, j, 2])\n\n\t\t\tax.set_title(label)\n\n\t\tplt.show()\n\n\tdef view_com_displacements(self, deriv=0):\n\t\t\"\"\"Plot and show graph of X, Y, and Z motion of CoM of dog.\n\t\tIf deriv > 0, plot that derivative of the displacement\"\"\"\n\n\t\tfig, ax = plt.subplots()\n\t\tcom_data = self.get_com_position()\n\t\tif deriv > 0:\n\t\t\tcom_data = nth_time_deriv(com_data, 1 / self.freq, n=deriv)\n\n\t\tfor i in [0, 1, 2]:\n\t\t\tax.plot(com_data[:, i], label=\"xyz\"[i])\n\n\t\tax.legend()\n\t\tplt.show()\n\n\tdef calc_leg_lengths(self):\n\t\t\"\"\"Uses the compliant-legged walking model estimation to work out the average length of legs.\n\t\tAssume legs are undeformed while off ground. Work out avg distance from leg to COM\"\"\"\n\n\t\tself.leg_disps = {} # length of leg over time for each paw\n\t\tself.leg_vecs = {} # normalised vector of leg spring direction for each paw\n\n\t\tplot = True\n\t\tif plot: fig, axes = plt.subplots(nrows=2, ncols=2, sharex=\"all\", sharey=\"row\")\n\n\t\tfor n, paw in enumerate(self.foot_joints):\n\t\t\tis_front = n < 2 # Assumes order of f left, f right, r left, r right\n\n\t\t\ttol = 1e-3\n\t\t\ton_ground = self.paw_disps[paw] > tol\n\t\t\toff_ground = self.paw_disps[paw] <= tol\n\n\t\t\t# centre_of_rot = self.body.XG[:]#self.body.X[:, int(is_front)]\n\t\t\t# centre_of_rot = self.unsmoothed_data[:, self.body_joints[is_front][n%2]]\n\t\t\tif self.is_mocap:\n\t\t\t\tcentre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]\n\t\t\t\tpaw_pos = self.unsmoothed_data[:, paw]\n\n\t\t\telse:\n\t\t\t\tcentre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]\n\t\t\t\tpaw_pos = self.unsmoothed_data[:, paw]\n\n\t\t\tX, Z = np.swapaxes(centre_of_rot[:, [0, 2]], 0, 1) # get X, Z position of CoM\n\t\t\tX_PAW, Z_PAW = np.swapaxes(paw_pos[:, [0, 2]], 0, 1) # get X, Z position of CoM\n\n\t\t\tTHETA = np.arctan((X_PAW - X) / (Z - Z_PAW)) # angle between spring and vertical\n\n\t\t\tL = ((X - X_PAW) ** 2 + (Z - Z_PAW) ** 2) ** .5\n\t\t\tL0 = (L).max()\n\n\t\t\tz_disp = (L - L0) * np.cos(THETA)\n\t\t\tx_disp = (L - L0) * np.sin(THETA)\n\n\t\t\t# get z displacement by footfall\n\t\t\tdisp = np.zeros(self.n_frames)\n\n\t\t\t# if self.is_mocap:\n\t\t\tfor ff in consecutive(np.where(on_ground)[0]):\n\t\t\t\tif len(ff) < 3: continue # min width of footfall required\n\t\t\t\tdisp[ff] = z_disp[ff].max() - z_disp[ff]\n\n\t\t\t# else:\n\t\t\t# disp = -z_disp\n\n\t\t\tself.leg_disps[paw] = disp\n\n\t\t\tif plot:\n\t\t\t\tax = axes[n // 2, n % 2]\n\n\t\t\t\t# ax.plot(L)\n\t\t\t\tax.plot(L - L0)\n\t\t\t\tax.plot(disp, color=\"green\")\n\n\t\tif plot:\n\t\t\tplt.tight_layout()\n\t\t\t# plt.show()\n\t\t\tplt.show(block=False)\n\t\t\tplt.draw()\n\t\t\tplt.pause(1e-8)\n\n\ndef norm_kin_data(kin_data, targ_markers=None):\n\t\"\"\"Normalise kinematic data.\n\tIf targ_markers given, normalise so these markers are at desired height\"\"\"\n\n\tnorm_height = 0.4 # 0.635 # fixed to Ally height for now\n\n\t# scale so minimum is at (0,0,0)\n\tfor dim in [0, 1, 2]:\n\t\tkin_data[:, :, dim] -= kin_data[:, :, dim].min()\n\n\tif targ_markers is None:\n\t\tkin_data = norm_height * kin_data / np.max(kin_data[:, :, 2])\n\n\telif targ_markers is not None:\n\t\theight_target = kin_data[:, targ_markers, 2].mean()\n\t\tkin_data = norm_height * kin_data / height_target\n\n\treturn kin_data\n\n\ndef get_dyn_data(dynamic_src, clip_length, mass, is_mocap=True, target_freq=100):\n\t\"\"\"Loads and returns kinematic data\"\"\"\n\n\tforce_plate_data, force_plate_tdelay = load_force_plate_data(dynamic_src, is_mocap)\n\traw_dyn_data = force_plate_data\n\traw_dyn_data *= 1 / (mass * 9.81)\n\n\t# resample if requested\n\tif target_freq != freq_forceplate:\n\t\ttarget_frames = int(len(raw_dyn_data) * target_freq / freq_forceplate)\n\t\tdyn_data = signal.resample(raw_dyn_data, target_frames)\n\n\t\t# this resampling causes a jumpiness for the periods of zero value. Fix that here:\n\t\ttol = 1e-4\n\t\tfor paw in range(dyn_data.shape[1]):\n\t\t\t# get indices where should be 0\n\t\t\tantifootfalls = consecutive(np.where(raw_dyn_data[:, paw] < tol)[0])\n\t\t\tmin_width = 10 # in frames\n\n\t\t\tfor aff in antifootfalls:\n\t\t\t\tif len(aff) < min_width: continue\n\t\t\t\tstart, end = aff[0] * target_freq / freq_forceplate, aff[-1] * target_freq / freq_forceplate\n\t\t\t\t# ^ start and end indices, in remapped frame\n\t\t\t\tdyn_data[int(start):int(end), paw] = 0 # set to 0\n\n\t\tfreq = target_freq\n\n\telse:\n\t\tfreq = freq_forceplate\n\t\tdyn_data = raw_dyn_data\n\n\tframe_delay = int(freq * force_plate_tdelay)\n\tn_frames_forceplate = int(clip_length * freq) # number of frames for forceplate to be same time length as mocap\n\n\tif frame_delay == 0:\n\t\treturn dyn_data[:n_frames_forceplate]\n\n\tif frame_delay > 0: # crop forceplate data\n\t\treturn dyn_data[frame_delay: frame_delay + n_frames_forceplate] # crop forceplate data to match mocap/SMAL data\n\n\telse: # fdelay <0, pad forceplate data\n\t\treturn np.pad(dyn_data, ((int(-frame_delay), 0), (0, 0)))[:n_frames_forceplate]\n\n\nkin_src_to_solver_name = lambda s: s.replace(\"/\", \" \").replace(\" \", \"_\").replace(\".c3d\", \"\")\n\n\ndef load_solver(kin_src, clip_length, mocap=True, resample_freq=100):\n\tif mocap:\n\t\tjoint_data = C3DData(ax=None, src=kin_src, interpolate=True, crop=clip_length,\n\t\t\t\t\t\t\t fix_rotations=\"3 kph\" in kin_src) # only fix rotations for 3 kph for now\n\n\telse:\n\t\tjoint_data = SMALData(kin_src, freq=30, norm=True, crop=clip_length, smooth=True)\n\n\tjoint_data.resample_at(resample_freq) ### TRY RESAMPLING DATA TO 100 Hz\n\ttarget_bones, body_joints, no_torque_joints, leg_spring_joints = joint_data.generate_skeleton_mapping()\n\n\t# Normalise data based on z data, so that the dog is roughly 0.5m high. Also smooth data\n\tkin_data = np.array(joint_data.all_data)\n\tkin_data = norm_kin_data(kin_data, targ_markers=leg_spring_joints)\n\n\tsolver_kwargs = dict(target_bones=target_bones,\n\t\t\t\t\t\t body_joints=body_joints, no_torque_joints=no_torque_joints,\n\t\t\t\t\t\t foot_joints=no_torque_joints, leg_spring_joints=leg_spring_joints,\n\t\t\t\t\t\t freq=joint_data.freq,\n\t\t\t\t\t\t name=kin_src_to_solver_name(kin_src))\n\n\tsolver = InverseDynamicsSolver(joint_data=kin_data, **solver_kwargs, is_mocap=mocap)\n\tprint(f\"Solver loaded. Mass = {solver.total_mass:.1f} kg.\")\n\treturn solver\n"
] | [
[
"scipy.signal.savgol_filter",
"scipy.signal.resample"
]
] |
gosticks/body-pose-animation | [
"eb1b5876a845f277d43bfc18dcd48c4a9c694c06"
] | [
"utils/video.py"
] | [
"from dataset import SMPLyDataset\nimport pickle\nfrom typing import Tuple\nfrom model import SMPLyModel\nfrom renderer import DefaultRenderer\nimport cv2\nfrom tqdm import tqdm\nimport numpy as np\nfrom scipy import interpolate\n\n\ndef make_video(images, video_name: str, fps=30, ext: str = \"mp4\", post_process_frame=None):\n images = np.array(images)\n width = images.shape[2]\n height = images.shape[1]\n\n fourcc = 0\n if ext == \"mp4\":\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n\n video_name = video_name + \".\" + ext\n\n video = cv2.VideoWriter(\n video_name, fourcc, fps, (width, height), True)\n\n for idx in tqdm(range(len(images))):\n img = images[idx]\n im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if post_process_frame is not None:\n img_rgb = post_process_frame(img=im_rgb, idx=idx)\n\n video.write(im_rgb)\n\n video.release()\n print(\"video saved to:\", video_name)\n\n\ndef video_from_pkl(filename, video_name, config, ext: str = \"mp4\"):\n with open(filename, \"rb\") as fp:\n model_outs = pickle.load(fp)\n save_to_video(model_outs, video_name, config)\n\n\ndef save_to_video(\n sample_output: Tuple,\n video_name: str,\n config: object,\n fps=30,\n include_thumbnail=True,\n thumbnail_size=0.2,\n start_frame_offset=0,\n dataset: SMPLyDataset = None,\n interpolation_target=None\n):\n \"\"\"\n Renders a video from pose, camera tuples. Additionally interpolation can be used to smooth out the animation\n\n Args:\n sample_output (Tuple): A tuple of body pose vertices and a camera transformation\n video_name (str): name for the resulting video file (can also be a path)\n config (object): general run config\n fps (int, optional): animation base fps. Defaults to 30.\n interpolation_target (int, optional): expand animation fps via interpolation to this target. Defaults to 60.\n \"\"\"\n r = DefaultRenderer(\n offscreen=True\n )\n r.start()\n\n model_anim = SMPLyModel.model_from_conf(config)\n\n if interpolation_target is not None:\n if interpolation_target % fps != 0:\n print(\"[error] interpolation target must be a multiple of fps\")\n return\n inter_ratio = int(interpolation_target / fps)\n num_intermediate = inter_ratio - 1\n sample_output = interpolate_poses(sample_output, num_intermediate)\n else:\n sample_output = [\n (\n out.vertices.detach().cpu().numpy()[0],\n cam\n ) for out, cam in sample_output]\n frames = []\n print(\"[export] rendering animation frames...\", sample_output[0][0].shape)\n\n # just use the first transform\n cam_transform = sample_output[0][1]\n\n for vertices, cam_trans in tqdm(sample_output):\n r.render_model_geometry(\n faces=model_anim.faces,\n vertices=vertices,\n pose=cam_trans # cam_transform,\n )\n frames.append(r.get_snapshot())\n\n target_fps = fps\n if interpolation_target is not None:\n target_fps = interpolation_target\n\n def post_process_frame(img, idx: int):\n if not include_thumbnail:\n return img\n # account for start from frames not zero\n idx = start_frame_offset + idx\n frame_idx = idx\n if interpolation_target is not None:\n # account for possible interpolation\n frame_idx = int(idx / inter_ratio)\n img_path = dataset.get_image_path(frame_idx)\n overlay = cv2.imread(img_path)\n\n if overlay is None:\n print(\"[error] image could not be \", img_path)\n return img\n\n overlay = cv2.resize(\n overlay,\n dsize=(\n int(overlay.shape[1] * thumbnail_size),\n int(overlay.shape[0] * thumbnail_size)\n ))\n img[0:overlay.shape[0], 0:overlay.shape[1]] = overlay\n return img\n\n make_video(frames, video_name, target_fps,\n post_process_frame=post_process_frame)\n\n\ndef make_video_with_pip(frames, pip_image_path, video_name: str, fps=30, ext: str = \"mp4\", image_size=0.2):\n \"\"\"renders a video with a pip frame in the corner\n \"\"\"\n\n def post_process_frame(img, idx: int):\n overlay = cv2.imread(pip_image_path)\n\n if overlay is None:\n print(\"[error] image could not be \", pip_image_path)\n return img\n\n overlay = cv2.resize(\n overlay,\n dsize=(\n int(overlay.shape[1] * image_size),\n int(overlay.shape[0] * image_size)\n ))\n img[0:overlay.shape[0], 0:overlay.shape[1]] = overlay\n return img\n\n make_video(frames, video_name, fps,\n post_process_frame=post_process_frame)\n\n\ndef interpolate_poses(poses, num_intermediate=5):\n \"\"\"\n Interpolate vertices and cameras between pairs of frames by adding intermediate results\n\n :param poses: optimized poses\n :param num_intermediate: amount of intermediate results to insert between each pair of frames\n :return: interpolated poses, list of tuples (body_pose, camera_pose)\n \"\"\"\n new_poses = []\n for i in range(len(poses) - 1):\n if len(poses) < 2:\n return poses\n else:\n # Shape of one matrix of vertices = torch.Size([1, 10475, 3])\n pose_1 = poses[i][0].vertices.detach().cpu().numpy()\n pose_2 = poses[i + 1][0].vertices.detach().cpu().numpy()\n poses_pair = np.concatenate((pose_1, pose_2), axis=0)\n\n camera_1 = np.expand_dims(poses[i][1], axis=0)\n camera_2 = np.expand_dims(poses[i + 1][1], axis=0)\n camera_pair = np.concatenate((camera_1, camera_2), axis=0)\n\n x = np.arange(poses_pair.shape[0])\n f1 = interpolate.interp1d(x, poses_pair, axis=0)\n f2 = interpolate.interp1d(x, camera_pair, axis=0)\n\n evenly_spaced_points = np.linspace(\n x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)\n\n new_frames = f1(evenly_spaced_points)\n new_cameras = f2(evenly_spaced_points)\n\n arr = [(new_frames[i], new_cameras[i])\n for i in range(new_frames.shape[0])]\n if 0 < i < len(poses) - 1:\n # remove first frame that was already added in the last interpolation\n arr.pop(0)\n new_poses += arr\n\n return new_poses\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.arange",
"numpy.expand_dims",
"numpy.array",
"numpy.concatenate",
"numpy.linspace"
]
] |
caditi97/exatrkx-ctd2020 | [
"ed090ddfcc9e2e623fb45000fca71d5ad6ccf3b9"
] | [
"GraphLearning/src/distributed/torch.py"
] | [
"\"\"\"Utility code for running native pytorch distributed\"\"\"\n\nimport os\n\nimport torch.distributed as dist\n\ndef init_workers_file():\n rank = int(os.environ['SLURM_PROCID'])\n n_ranks = int(os.environ['SLURM_NTASKS'])\n sync_file = 'file:///tmp/%s_%s_pytorch_sync' % (\n os.environ['USER'], os.environ['SLURM_JOB_ID'])\n dist.init_process_group(backend='nccl', world_size=n_ranks, rank=rank,\n init_method=sync_file)\n return rank, n_ranks\n\ndef init_workers_mpi():\n dist.init_process_group(backend='mpi')\n rank = dist.get_rank()\n n_ranks = dist.get_world_size()\n return rank, n_ranks\n"
] | [
[
"torch.distributed.init_process_group",
"torch.distributed.get_world_size",
"torch.distributed.get_rank"
]
] |
ishandutta2007/incubator-mxnet | [
"54a3c58c49fdfac595a348301b6f0701db09d4ab"
] | [
"tests/python/unittest/test_io.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: skip-file\nimport mxnet as mx\nfrom mxnet.test_utils import *\nimport numpy as np\nimport os, gzip\nimport pickle as pickle\nimport time\ntry:\n import h5py\nexcept ImportError:\n h5py = None\nimport sys\nfrom common import get_data\nimport unittest\n\n\ndef test_MNISTIter():\n # prepare data\n get_data.GetMNIST_ubyte()\n\n batch_size = 100\n train_dataiter = mx.io.MNISTIter(\n image=\"data/train-images-idx3-ubyte\",\n label=\"data/train-labels-idx1-ubyte\",\n data_shape=(784,),\n batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)\n # test_loop\n nbatch = 60000 / batch_size\n batch_count = 0\n for batch in train_dataiter:\n batch_count += 1\n assert(nbatch == batch_count)\n # test_reset\n train_dataiter.reset()\n train_dataiter.iter_next()\n label_0 = train_dataiter.getlabel().asnumpy().flatten()\n train_dataiter.iter_next()\n train_dataiter.iter_next()\n train_dataiter.iter_next()\n train_dataiter.iter_next()\n train_dataiter.reset()\n train_dataiter.iter_next()\n label_1 = train_dataiter.getlabel().asnumpy().flatten()\n assert(sum(label_0 - label_1) == 0)\n\ndef test_Cifar10Rec():\n get_data.GetCifar10()\n dataiter = mx.io.ImageRecordIter(\n path_imgrec=\"data/cifar/train.rec\",\n mean_img=\"data/cifar/cifar10_mean.bin\",\n rand_crop=False,\n and_mirror=False,\n shuffle=False,\n data_shape=(3,28,28),\n batch_size=100,\n preprocess_threads=4,\n prefetch_buffer=1)\n labelcount = [0 for i in range(10)]\n batchcount = 0\n for batch in dataiter:\n npdata = batch.data[0].asnumpy().flatten().sum()\n sys.stdout.flush()\n batchcount += 1\n nplabel = batch.label[0].asnumpy()\n for i in range(nplabel.shape[0]):\n labelcount[int(nplabel[i])] += 1\n for i in range(10):\n assert(labelcount[i] == 5000)\n\ndef test_NDArrayIter():\n data = np.ones([1000, 2, 2])\n label = np.ones([1000, 1])\n for i in range(1000):\n data[i] = i / 100\n label[i] = i / 100\n dataiter = mx.io.NDArrayIter(data, label, 128, True, last_batch_handle='pad')\n batchidx = 0\n for batch in dataiter:\n batchidx += 1\n assert(batchidx == 8)\n dataiter = mx.io.NDArrayIter(data, label, 128, False, last_batch_handle='pad')\n batchidx = 0\n labelcount = [0 for i in range(10)]\n for batch in dataiter:\n label = batch.label[0].asnumpy().flatten()\n assert((batch.data[0].asnumpy()[:,0,0] == label).all())\n for i in range(label.shape[0]):\n labelcount[int(label[i])] += 1\n\n for i in range(10):\n if i == 0:\n assert(labelcount[i] == 124)\n else:\n assert(labelcount[i] == 100)\n\ndef test_NDArrayIter_h5py():\n if not h5py:\n return\n\n data = np.ones([1000, 2, 2])\n label = np.ones([1000, 1])\n for i in range(1000):\n data[i] = i / 100\n label[i] = i / 100\n\n try:\n os.remove(\"ndarraytest.h5\")\n except OSError:\n pass\n with h5py.File(\"ndarraytest.h5\") as f:\n f.create_dataset(\"data\", data=data)\n f.create_dataset(\"label\", data=label)\n\n dataiter = mx.io.NDArrayIter(f[\"data\"], f[\"label\"], 128, True, last_batch_handle='pad')\n batchidx = 0\n for batch in dataiter:\n batchidx += 1\n assert(batchidx == 8)\n\n dataiter = mx.io.NDArrayIter(f[\"data\"], f[\"label\"], 128, False, last_batch_handle='pad')\n labelcount = [0 for i in range(10)]\n for batch in dataiter:\n label = batch.label[0].asnumpy().flatten()\n assert((batch.data[0].asnumpy()[:,0,0] == label).all())\n for i in range(label.shape[0]):\n labelcount[int(label[i])] += 1\n\n try:\n os.remove(\"ndarraytest.h5\")\n except OSError:\n pass\n\n for i in range(10):\n if i == 0:\n assert(labelcount[i] == 124)\n else:\n assert(labelcount[i] == 100)\n\ndef test_NDArrayIter_csr():\n # creating toy data\n num_rows = rnd.randint(5, 15)\n num_cols = rnd.randint(1, 20)\n batch_size = rnd.randint(1, num_rows)\n shape = (num_rows, num_cols)\n csr, _ = rand_sparse_ndarray(shape, 'csr')\n dns = csr.asnumpy()\n #test CSRNDArray with shuffle=True will throw NotImplementedError \n try:\n csr_iter = mx.io.NDArrayIter({'data': csr}, dns, batch_size, shuffle=True,\n last_batch_handle='discard')\n assert(False)\n except NotImplementedError:\n pass\n\n # make iterators\n csr_iter = iter(mx.io.NDArrayIter(csr, csr, batch_size, last_batch_handle='discard'))\n begin = 0\n for batch in csr_iter:\n expected = np.zeros((batch_size, num_cols))\n end = begin + batch_size\n expected[:num_rows - begin] = dns[begin:end]\n if end > num_rows:\n expected[num_rows - begin:] = dns[0:end - num_rows]\n assert_almost_equal(batch.data[0].asnumpy(), expected)\n begin += batch_size\n\ndef test_LibSVMIter():\n\n def check_libSVMIter_synthetic():\n cwd = os.getcwd()\n data_path = os.path.join(cwd, 'data.t')\n label_path = os.path.join(cwd, 'label.t')\n with open(data_path, 'w') as fout:\n fout.write('1.0 0:0.5 2:1.2\\n')\n fout.write('-2.0\\n')\n fout.write('-3.0 0:0.6 1:2.4 2:1.2\\n')\n fout.write('4 2:-1.2\\n')\n\n with open(label_path, 'w') as fout:\n fout.write('1.0\\n')\n fout.write('-2.0 0:0.125\\n')\n fout.write('-3.0 2:1.2\\n')\n fout.write('4 1:1.0 2:-1.2\\n')\n\n data_dir = os.path.join(cwd, 'data')\n data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,\n data_shape=(3, ), label_shape=(3, ), batch_size=3)\n\n first = mx.nd.array([[ 0.5, 0., 1.2], [ 0., 0., 0.], [ 0.6, 2.4, 1.2]])\n second = mx.nd.array([[ 0., 0., -1.2], [ 0.5, 0., 1.2], [ 0., 0., 0.]])\n i = 0\n for batch in iter(data_train):\n expected = first.asnumpy() if i == 0 else second.asnumpy()\n assert_almost_equal(data_train.getdata().asnumpy(), expected)\n i += 1\n\n def check_libSVMIter_news_data():\n news_metadata = {\n 'name': 'news20.t',\n 'origin_name': 'news20.t.bz2',\n 'url': \"http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/news20.t.bz2\",\n 'feature_dim': 62060,\n 'num_classes': 20,\n 'num_examples': 3993,\n }\n batch_size = 33\n num_examples = news_metadata['num_examples']\n data_dir = os.path.join(os.getcwd(), 'data')\n get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],\n news_metadata['origin_name'])\n path = os.path.join(data_dir, news_metadata['name'])\n data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),\n batch_size=batch_size)\n for epoch in range(2):\n num_batches = 0\n for batch in data_train:\n # check the range of labels\n assert(np.sum(batch.label[0].asnumpy() > 20) == 0)\n assert(np.sum(batch.label[0].asnumpy() <= 0) == 0)\n num_batches += 1\n expected_num_batches = num_examples / batch_size\n assert(num_batches == int(expected_num_batches)), num_batches\n data_train.reset()\n\n check_libSVMIter_synthetic()\n check_libSVMIter_news_data()\n \[email protected](\"test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7826\")\ndef test_CSVIter():\n def check_CSVIter_synthetic():\n cwd = os.getcwd()\n data_path = os.path.join(cwd, 'data.t')\n label_path = os.path.join(cwd, 'label.t')\n with open(data_path, 'w') as fout:\n for i in range(1000):\n fout.write(','.join(['1' for _ in range(8*8)]) + '\\n')\n with open(label_path, 'w') as fout:\n for i in range(1000):\n fout.write('0\\n')\n\n data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8,8),\n label_csv=label_path, batch_size=100)\n expected = mx.nd.ones((100, 8, 8))\n for batch in iter(data_train):\n assert_almost_equal(data_train.getdata().asnumpy(), expected.asnumpy())\n\n check_CSVIter_synthetic()\n\nif __name__ == \"__main__\":\n test_NDArrayIter()\n if h5py:\n test_NDArrayIter_h5py()\n test_MNISTIter()\n test_Cifar10Rec()\n test_LibSVMIter()\n test_NDArrayIter_csr()\n test_CSVIter()\n"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
flaviofontes29/Machine-Learning-e-Data-Science-com-Python | [
"7b8188b6e7003426ae3a6d46d91d61494135a2b7"
] | [
"Secao 3 - Pre-processamento com Pandas e scikit-learm/template_credit_data.py"
] | [
"import pandas as pd\nimport numpy as np\n\nbase = pd.read_csv('credit_data.csv')\nbase.loc[base.age < 0, 'age'] = 40.92\n \nprevisores = base.iloc[:, 1:4].values\nclasse = base.iloc[:, 4].values\n\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')\nimputer = imputer.fit(previsores[:, 1:4])\nprevisores[:, 1:4] = imputer.transform(previsores[:, 1:4])\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nprevisores = scaler.fit_transform(previsores)\n\nfrom sklearn.model_selection import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.25, random_state=0)\n"
] | [
[
"pandas.read_csv",
"sklearn.impute.SimpleImputer",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler"
]
] |
tomvdon/lidar-bonnetal | [
"0bb78eb9a731e98e6f3b893d735b6c3ca96cb0e8"
] | [
"train_test_split.py"
] | [
"import shutil\nimport os\nimport glob\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport shutil\nimport re\n\nclouds = glob.glob('range_images/point_cloud_*.pcd')\ntrain, test = train_test_split(clouds, test_size=0.20, random_state=42)\nfor file in train:\n shutil.copy(file, \"simulated_data/sequences/00\")\n number = re.findall(r'[0-9]+', file)[0]\n label = os.path.join(os.path.sep.join(file.split(os.sep)[:-1]), \"labels\",\n \"label_\" + number + \".npy\")\n shutil.copy(label, \"simulated_data/sequences/00/labels\")\nfor file in test:\n shutil.copy(file, \"simulated_data/sequences/01\")\n number = re.findall(r'[0-9]+', file)[0]\n label = os.path.join(os.path.sep.join(file.split(os.sep)[:-1]), \"labels\",\n \"label_\" + number + \".npy\")\n shutil.copy(label, \"simulated_data/sequences/01/labels\")\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] |
arunraja-hub/transformers | [
"76cadb7943c8492ec481f4f3925e9e8793a32c9d"
] | [
"tests/test_modeling_flax_vit.py"
] | [
"# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport unittest\n\nimport numpy as np\n\nfrom transformers import ViTConfig, is_flax_available\nfrom transformers.testing_utils import require_flax, slow\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor\n\n\nif is_flax_available():\n\n import jax\n from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel\n\n\nclass FlaxViTModelTester(unittest.TestCase):\n def __init__(\n self,\n parent,\n batch_size=13,\n image_size=30,\n patch_size=2,\n num_channels=3,\n is_training=True,\n use_labels=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n type_sequence_label_size=10,\n initializer_range=0.02,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.is_training = is_training\n self.use_labels = use_labels\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n config = ViTConfig(\n image_size=self.image_size,\n patch_size=self.patch_size,\n num_channels=self.num_channels,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n is_decoder=False,\n initializer_range=self.initializer_range,\n )\n\n return config, pixel_values\n\n def create_and_check_model(self, config, pixel_values, labels):\n\n model = FlaxViTModel(config=config)\n result = model(pixel_values)\n # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)\n image_size = (self.image_size, self.image_size)\n patch_size = (self.patch_size, self.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n pixel_values,\n ) = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_flax\nclass FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()\n\n def setUp(self) -> None:\n self.model_tester = FlaxViTModelTester(self)\n self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n # We need to override this test because in ViT, the seq_len equals the number of patches + 1\n # we compute that here\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n num_patches = (config.image_size // config.patch_size) ** 2\n seq_length = num_patches + 1\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = False\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n # check that output_attentions also work using config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_length, seq_length],\n )\n out_len = len(outputs)\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n added_hidden_states = 1\n self.assertEqual(out_len + added_hidden_states, len(outputs))\n\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_length, seq_length],\n )\n\n # We neeed to override this test because ViT's forward signature is different than text models.\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.__call__)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n # We neeed to override this test because ViT expects pixel_values instead of input_ids\n @slow\n def test_jit_compilation(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n\n @jax.jit\n def model_jitted(pixel_values, **kwargs):\n return model(pixel_values=pixel_values, **kwargs)\n\n with self.subTest(\"JIT Enabled\"):\n jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n with self.subTest(\"JIT Disabled\"):\n with jax.disable_jit():\n outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n self.assertEqual(len(outputs), len(jitted_outputs))\n for jitted_output, output in zip(jitted_outputs, outputs):\n self.assertEqual(jitted_output.shape, output.shape)\n\n # We need to override this test because in ViT, the seq_len equals the number of patches + 1\n # we compute that here\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n num_patches = (config.image_size // config.patch_size) ** 2\n seq_length = num_patches + 1 # we add 1 for the [CLS] token\n\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n hidden_states = outputs.hidden_states\n\n self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)\n\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [seq_length, self.model_tester.hidden_size],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n @slow\n def test_model_from_pretrained(self):\n for model_class_name in self.all_model_classes:\n model = model_class_name.from_pretrained(\"google/vit-base-patch16-224\")\n outputs = model(np.ones((1, 3, 224, 224)))\n self.assertIsNotNone(outputs)\n"
] | [
[
"numpy.ones"
]
] |
yamamon75/PmagPy | [
"fa5b189800a239683fc17c6b312cdfdd839a46c3"
] | [
"pmagpy/controlled_vocabularies2.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport json\nimport os\nfrom builtins import object\n\nimport pandas as pd\nfrom pandas import Series\nfrom . import find_pmag_dir\n\npmag_dir = find_pmag_dir.get_pmag_dir()\ndata_model_dir = os.path.join(pmag_dir, 'pmagpy', 'data_model')\n# if using with py2app, the directory structure is flat,\n# so check to see where the resource actually is\nif not os.path.exists(data_model_dir):\n data_model_dir = os.path.join(pmag_dir, 'data_model')\n\n\nclass Vocabulary(object):\n\n def __init__(self):\n self.vocabularies = []\n self.possible_vocabularies = []\n self.all_codes = []\n self.code_types = []\n self.er_methods = []\n self.pmag_methods = []\n self.age_methods = []\n\n def get_one_meth_type(self, mtype, method_list):\n \"\"\"\n Get all codes of one type (i.e., 'anisotropy_estimation')\n \"\"\"\n cond = method_list['dtype'] == mtype\n codes = method_list[cond]\n return codes\n\n def get_one_meth_category(self, category, all_codes, code_types):\n \"\"\"\n Get all codes in one category (i.e., all pmag codes).\n This can include multiple method types (i.e., 'anisotropy_estimation', 'sample_prepartion', etc.)\n \"\"\"\n categories = Series(code_types[code_types[category] == True].index)\n cond = all_codes['dtype'].isin(categories)\n codes = all_codes[cond]\n return codes\n\n def get_tiered_meth_category_offline(self, category):\n path = os.path.join(data_model_dir, '{}_methods.txt'.format(category))\n dfile = open(path)\n json_data = json.load(dfile)\n dfile.close()\n return json_data\n\n def get_meth_codes(self):\n print('-I- Getting cached method codes for 2.5')\n er_methods = self.get_tiered_meth_category_offline('er')\n pmag_methods = self.get_tiered_meth_category_offline('pmag')\n age_methods = self.get_tiered_meth_category_offline('age')\n path = os.path.join(data_model_dir, 'code_types.txt')\n with open(path, 'r') as type_file:\n raw_code_types = json.load(type_file)\n code_types = pd.read_json(raw_code_types)\n path = os.path.join(data_model_dir, 'all_codes.txt')\n with open(path, 'r') as code_file:\n raw_all_codes = json.load(code_file)\n all_codes = pd.read_json(raw_all_codes)\n self.er_methods = er_methods\n self.pmag_methods = pmag_methods\n self.age_methods = age_methods\n self.all_codes = all_codes\n self.code_types = code_types\n\n def get_vocabularies(self):\n print('-I- Getting cached controlled vocabularies for 2.5')\n ## skip trying to get method codes etc. dynamically.\n ## 2.5 method codes etc. are no longer available on earthref\n #all_codes, code_types = self.get_meth_codes()\n #if any(all_codes):\n # er_methods = self.get_tiered_meth_category('er', all_codes, code_types)\n # pmag_methods = self.get_tiered_meth_category('pmag', all_codes, code_types)\n # age_methods = self.get_tiered_meth_category('age', all_codes, code_types)\n #else:\n #\n # method codes\n\n # controlled vocabularies\n path = os.path.join(data_model_dir, 'controlled_vocabularies2.json')\n with open(path, 'r') as code_file:\n raw_vocabularies = json.load(code_file)\n vocabularies = dict([(k, v) for k, v in raw_vocabularies.items()])\n self.vocabularies = vocabularies\n self.possible_vocabularies = vocabularies\n\n def get_all_vocabulary(self):\n self.get_vocabularies()\n self.get_meth_codes()\n\n\nvocab = Vocabulary()\n"
] | [
[
"pandas.Series",
"pandas.read_json"
]
] |
halotudio/openPNM-copy2 | [
"d400ec65e9421256a531f6d22a38255b002d5dcb"
] | [
"openpnm/io/CSV.py"
] | [
"import re\nimport numpy as np\nfrom openpnm.io.Pandas import Pandas\nfrom openpnm.io import GenericIO, Dict\nfrom openpnm.utils import logging, Workspace\nlogger = logging.getLogger(__name__)\nws = Workspace()\n\n\nclass CSV(GenericIO):\n r\"\"\"\n Reads and writes CSV (comma-separated-value files) containing pore and\n throat data\n\n Notes\n -----\n There are a few rules governing how the data is be stored:\n\n 1. The first row of the file (column headers) must contain the\n property names. The subsequent rows contain the data.\n\n 2. The property names should be in the usual OpenPNM format, such as\n of ``pore.volume`` or ``throat.surface_area``.\n\n 3. Each column represents a specific property. For Np x 1 or Nt x 1\n data such as *pore.volume* this is straightforward. For Np x *m* or\n Nt x *m* data, each of the *m* columns should have their own column in\n in the CSV file, with a numpy-style index indicating which axis it\n corresponds to. For instance, the *pore.coords* values should be stored\n as three separate columns with the headings: *pore.coords[0]*,\n *pore.coords[1]*, and *pore.coords[2]*. OpenPNM will convert that back\n into an Np x *m* array upon loading.\n\n 4. The file can contain both or either pore and throat data.\n\n 5. Labels can be imported by placing the characters TRUE and FALSE\n in a column corresponding to the label name (i.e. *pore.front*). TRUE\n indicates where the label applies and FALSE otherwise.\n\n \"\"\"\n\n @classmethod\n def save(cls, *args, **kwargs):\n r\"\"\"\n This method is to be deprecated. Use ``export_data`` instead.\n \"\"\"\n cls.export_data(*args, **kwargs)\n\n @classmethod\n def export_data(cls, network=None, phases=[], filename='', delim=' | '):\n r\"\"\"\n Save all the pore and throat property data on the Network (and\n optionally on any Phases objects) to CSV files.\n\n Parameters\n ----------\n network : OpenPNM Network\n The Network containing the data to be stored\n\n phases : list of OpenPNM Phases (optional)\n The Phases whose data should be stored.\n\n filename : string or path object\n The name of the file to store the data\n\n Notes\n -----\n The data from all Geometry objects is added to the file automatically.\n\n \"\"\"\n project, network, phases = cls._parse_args(network=network,\n phases=phases)\n df = Pandas.to_dataframe(network=network, phases=phases,\n join=True, delim=delim)\n # Write to file\n if filename == '':\n filename = project.name\n fname = cls._parse_filename(filename=filename, ext='csv')\n df.to_csv(fname, index=False)\n\n @classmethod\n def load(cls, *args, **kwargs):\n r\"\"\"\n This method will be deprecated. Use ``import_data`` instead.\n \"\"\"\n proj = cls.import_data(*args, **kwargs)\n return proj\n\n @classmethod\n def import_data(cls, filename, project=None, delim=' | '):\n r\"\"\"\n Opens a 'csv' file, reads in the data, and adds it to the **Network**\n\n Parameters\n ----------\n filename : string (optional)\n The name of the file containing the data to import. The formatting\n of this file is outlined below.\n\n project : OpenPNM Project object\n A GenericNetwork is created and added to the specified Project.\n If no Project object is supplied then one will be created and\n returned.\n\n Returns\n -------\n project : list\n An OpenPNM project containing the data assigned to Generic\n versions of the objects from which it was exported.\n\n \"\"\"\n from pandas import read_table\n\n if project is None:\n project = ws.new_project()\n\n fname = cls._parse_filename(filename, ext='csv')\n a = read_table(filepath_or_buffer=fname,\n sep=',',\n skipinitialspace=True,\n index_col=False,\n true_values=['T', 't', 'True', 'true', 'TRUE'],\n false_values=['F', 'f', 'False', 'false', 'FALSE'])\n\n dct = {}\n # First parse through all the items and re-merge columns\n keys = sorted(list(a.keys()))\n for item in keys:\n m = re.search(r'\\[.\\]', item) # The dot '.' is a wildcard\n if m: # m is None if pattern not found, otherwise merge cols\n pname = re.split(r'\\[.\\]', item)[0] # Get base propname\n # Find all other keys with same base propname\n merge_keys = [k for k in a.keys() if k.startswith(pname)]\n # Rerieve and remove arrays with same base propname\n merge_cols = [a.pop(k) for k in merge_keys]\n # Merge arrays into multi-column array and store in DataFrame\n dct[pname] = np.vstack(merge_cols).T\n # Remove key from list of keys\n for k in keys:\n if k.startswith(pname):\n keys.pop(keys.index(k))\n else:\n dct[item] = np.array(a.pop(item))\n\n project = Dict.from_dict(dct, project=project, delim=delim)\n\n return project\n"
] | [
[
"pandas.read_table",
"numpy.vstack"
]
] |
inestm28/si | [
"a82ba37bd628c5ebdc723f5e1a9894832c8f1a76"
] | [
"src/si/util/cv.py"
] | [
"from .util import train_test_split\nimport numpy as np\nimport itertools\n\n# MODEL SELECTION\n\nclass Cross_Validation:\n #avaliar a performance de um modelo\n def __init__(self, model, dataset,score=None, **kwargs):\n self.model=model #modelo que se quer avaliar\n self.dataset=dataset\n self.cv=kwargs.get('cv',3) #.get returns 3. number of folds (K-fold)\n self.split=kwargs.get('split', 0.8)\n self.train_scores=None\n self.test_scores=None\n self.ds=None\n self.score=score\n\n def run(self):\n train_scores = []\n test_scores = []\n ds=[] #lista com tuplos de conjuntos de treino e de teste\n for _ in range(self.cv): # 3 folds. underscore pq não vamos precisar do valor da variável\n train, test = train_test_split(self.dataset, self.split)\n ds.append((train, test))\n self.model.fit(train)\n if not self.score: #if self.score diferente de None então corre o ciclo\n train_scores.append(self.model.cost()) #cost -> dá a medida de quão longe o valor previsto está do output original\n test_scores.append(self.model.cost(test.X, test.y))\n else: #if self.score = None\n y_train=np.ma.apply_along_axis(self.model.predict, axis=0, arr=train.X.T)\n train_scores.append(self.score(train.y, y_train))\n y_test=np.ma.apply_along_axis(self.model.predict, axis=0, arr=test.X.T)\n test_scores.append(self.score(test.y, y_test))\n self.train_scores=train_scores\n self.test_scores=test_scores\n self.ds=ds\n return train_scores, test_scores #accuracies de cada fold\n\n def toDataframe(self):\n import pandas as pd\n assert self.train_scores and self.test_scores, 'Need to run code first'\n return pd.DataFrame({'Train Scores': self.train_scores, 'Test scores': self.test_scores})\n\nclass Grid_Search:\n #automatically selecting the best hyper parameteres for a particular model\n def __init__(self, model, dataset, parameters, **kwargs):\n self.model=model #modelo a ser avaliado\n self.dataset=dataset\n hasparam=[hasattr(self.model, param) for param in parameters] #hasattr() returns true if an object has the given named attribute, hasattr(object, name of attribute)\n if np.all(hasparam): #Test whether all array elements along a given axis evaluate to True.\n self.parameters=parameters #dictionary of all the parameters and their corresponding list of values that you want to test for best performance\n else:\n index=hasparam.index(False)\n keys=list(parameters.keys())\n raise ValueError(f\"wrong parameters: {keys[index]}\")\n self.kwargs=kwargs\n self.results=None\n\n def run(self):\n self.results=[]\n attrs=list(self.parameters.keys()) #nome dos parametros\n values=list(self.parameters.values()) #valores dos parametros\n for conf in itertools.product(*values): #itertools.product -> cartesian product of all the iterable provided as the argument.\n for i in range(len(attrs)):\n setattr(self.model, attrs[i], conf[i])\n scores=Cross_Validation(self.model, self.dataset, **self.kwargs).run() #faz CROSS VALIDATION\n self.results.append((conf, scores)) #para cada valor de parametro, dá as accuracies do modelo\n return self.results\n\n def toDataframe(self):\n import pandas as pd\n assert self.results, 'The grid search needs to be ran.'\n data=dict()\n for i, k in enumerate(self.parameters.keys()):\n v=[]\n for r in self.results:\n v.append(r[0][i])\n data[k]=v\n for i in range(len(self.results[0][1][0])):\n treino, teste = [], []\n for r in self.results:\n treino.append(r[1][0][i])\n teste.append(r[1][1][i])\n data['Train ' + str(i + 1)] = treino\n data['Test ' + str(i + 1)] = teste\n return pd.DataFrame(data)"
] | [
[
"pandas.DataFrame",
"numpy.ma.apply_along_axis",
"numpy.all"
]
] |
EmadAlamoudi/libpetab-python-MS | [
"7d21d79e9c02200d361a19c737d61c0e56123ca0"
] | [
"tests/test_visualization.py"
] | [
"import warnings\nfrom os import path\nfrom tempfile import TemporaryDirectory\nimport pytest\nfrom petab.C import *\nfrom petab.visualize import (plot_data_and_simulation,\n plot_measurements_by_observable,\n save_vis_spec)\nimport matplotlib.pyplot as plt\n\n\[email protected]\ndef data_file_Fujita():\n return \"doc/example/example_Fujita/Fujita_measurementData.tsv\"\n\n\[email protected]\ndef condition_file_Fujita():\n return \"doc/example/example_Fujita/Fujita_experimentalCondition.tsv\"\n\n\[email protected]\ndef data_file_Fujita_wrongNoise():\n return \"doc/example/example_Fujita/Fujita_measurementData_wrongNoise.tsv\"\n\n\[email protected]\ndef data_file_Fujita_nanData():\n return \"doc/example/example_Fujita/Fujita_measurementData_nanData.tsv\"\n\n\[email protected]\ndef simu_file_Fujita():\n return \"doc/example/example_Fujita/Fujita_simulatedData.tsv\"\n\n\[email protected]\ndef data_file_Fujita_minimal():\n return \"doc/example/example_Fujita/Fujita_measurementData_minimal.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_small():\n return \"doc/example/example_Fujita/Fujita_visuSpec_small.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_wo_dsid():\n return \"doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_1.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_minimal():\n return \"doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_mandatory.tsv\"\n\n\[email protected]\ndef visu_file_Fujita_empty():\n return \"doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_empty.tsv\"\n\n\[email protected]\ndef data_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_measurementData.tsv\"\n\n\[email protected]\ndef condition_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_experimentalCondition.tsv\"\n\n\[email protected]\ndef vis_spec_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_visualizationSpecification.tsv\"\n\n\[email protected]\ndef simulation_file_Isensee():\n return \"doc/example/example_Isensee/Isensee_simulationData.tsv\"\n\n\ndef test_visualization_with_vis_and_sim(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee,\n simulation_file_Isensee):\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee,\n simulation_file_Isensee)\n\n\ndef test_visualization_with_vis(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee):\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee)\n\n\ndef test_visualization_small_visu_file_w_datasetid(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_small):\n \"\"\"\n Test: visualization spezification file only with few columns in\n particular datasetId\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_small)\n\n\ndef test_visualization_small_visu_file_wo_datasetid(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_wo_dsid):\n \"\"\"\n Test: visualization spezification file only with few columns in\n particular no datasetId column\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_wo_dsid)\n\n\ndef test_visualization_minimal_visu_file(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_minimal):\n \"\"\"\n Test: visualization spezification file only with mandatory column plotId\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_minimal)\n\n\ndef test_visualization_empty_visu_file(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_empty):\n \"\"\"\n Test: Empty visualization spezification file should default to routine\n for no file at all\n \"\"\"\n plot_data_and_simulation(data_file_Fujita,\n condition_file_Fujita,\n visu_file_Fujita_empty)\n\n\ndef test_visualization_minimal_data_file(data_file_Fujita_minimal,\n condition_file_Fujita,\n visu_file_Fujita_small):\n \"\"\"\n Test visualization, with the case: data file only with mandatory columns\n (optional columns are optional)\n \"\"\"\n plot_data_and_simulation(data_file_Fujita_minimal,\n condition_file_Fujita,\n visu_file_Fujita_small)\n\n\ndef test_visualization_with_dataset_list(data_file_Isensee,\n condition_file_Isensee,\n simulation_file_Isensee):\n datasets = [['JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_ctrl',\n 'JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_Fsk'],\n ['JI09_160201_Drg453-452_CycNuc__ctrl',\n 'JI09_160201_Drg453-452_CycNuc__Fsk',\n 'JI09_160201_Drg453-452_CycNuc__Sp8_Br_cAMPS_AM']]\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets)\n\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n sim_data=simulation_file_Isensee,\n dataset_id_list=datasets)\n\n\ndef test_visualization_without_datasets(data_file_Fujita,\n condition_file_Fujita,\n simu_file_Fujita):\n sim_cond_num_list = [[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5]]\n sim_cond_id_list = [['model1_data1'], ['model1_data2', 'model1_data3'],\n ['model1_data4', 'model1_data5'], ['model1_data6']]\n observable_num_list = [[0], [1], [2], [0, 2], [1, 2]]\n observable_id_list = [['pS6_tot'], ['pEGFR_tot'], ['pAkt_tot']]\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_num_list=sim_cond_num_list,\n plotted_noise=PROVIDED)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n sim_cond_num_list=sim_cond_num_list,\n plotted_noise=PROVIDED)\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_id_list=sim_cond_id_list)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n sim_cond_id_list=sim_cond_id_list)\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n observable_num_list=observable_num_list)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n observable_num_list=observable_num_list)\n\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n observable_id_list=observable_id_list,\n plotted_noise=PROVIDED)\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_data=simu_file_Fujita,\n observable_id_list=observable_id_list,\n plotted_noise=PROVIDED)\n\n\ndef test_visualization_omit_empty_datasets(data_file_Fujita_nanData,\n condition_file_Fujita):\n observable_num_list = [[0, 1]]\n plot_data_and_simulation(data_file_Fujita_nanData, condition_file_Fujita,\n observable_num_list=observable_num_list)\n\n\ndef test_visualization_raises(data_file_Fujita,\n condition_file_Fujita,\n data_file_Fujita_wrongNoise):\n sim_cond_num_list = [[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5]]\n sim_cond_id_list = [['model1_data1'], ['model1_data2', 'model1_data3'],\n ['model1_data4', 'model1_data5'], ['model1_data6']]\n observable_num_list = [[0], [1], [2], [0, 2], [1, 2]]\n observable_id_list = [['pS6_tot'], ['pEGFR_tot'], ['pAkt_tot']]\n error_counter = 0\n\n # Combining simulation condition numbers and IDs should not be allowed\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_num_list=sim_cond_num_list,\n sim_cond_id_list=sim_cond_id_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Either specify a list of simulation '\n 'condition IDs or a list of simulation '\n 'condition numbers, but not both. '\n 'Stopping.')\n error_counter += 1\n assert (error_counter == 1)\n\n # Combining observable numbers and IDs should not be allowed\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n observable_num_list=observable_num_list,\n observable_id_list=observable_id_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Either specify a list of observable IDs or '\n 'a list of observable numbers, but not both. '\n 'Stopping.')\n error_counter += 1\n assert (error_counter == 2)\n\n # Combining observable and simulation conditions numbers or IDs should not\n # be allowed\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_num_list=observable_num_list,\n observable_num_list=observable_num_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Plotting without visualization specification'\n ' file and datasetId can be performed via '\n 'grouping by simulation conditions OR '\n 'observables, but not both. Stopping.')\n error_counter += 1\n assert (error_counter == 3)\n try:\n plot_data_and_simulation(data_file_Fujita, condition_file_Fujita,\n sim_cond_id_list=observable_id_list,\n observable_id_list=observable_id_list)\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == 'Plotting without visualization specification'\n ' file and datasetId can be performed via '\n 'grouping by simulation conditions OR '\n 'observables, but not both. Stopping.')\n error_counter += 1\n assert (error_counter == 4)\n\n # If no numerical noise is provided, it should not work to plot it\n try:\n plot_measurements_by_observable(data_file_Fujita_wrongNoise,\n condition_file_Fujita,\n plotted_noise='provided')\n except NotImplementedError as ErrMsg:\n assert(ErrMsg.args[0] == \"No numerical noise values provided in the \"\n \"measurement table. Stopping.\")\n error_counter += 1\n\n assert (error_counter == 5)\n\n\ndef test_visualization_warnings(data_file_Isensee, condition_file_Isensee):\n datasets = [['JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_ctrl',\n 'JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_Fsk'],\n ['JI09_160201_Drg453-452_CycNuc__ctrl',\n 'JI09_160201_Drg453-452_CycNuc__Fsk',\n 'JI09_160201_Drg453-452_CycNuc__Sp8_Br_cAMPS_AM']]\n sim_cond_num_list = [[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5]]\n observable_num_list = [[0], [1], [2], [0, 2], [1, 2]]\n\n # close open figures to avoid runtime warnings\n plt.close(\"all\")\n\n with warnings.catch_warnings(record=True) as warnMsg:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # plotting with datasetIds and sim conditions should issue a warning\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n sim_cond_num_list=sim_cond_num_list)\n\n # plotting with datasetIds and observables should issue a warning\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n observable_num_list=observable_num_list)\n\n # plotting with datasetIds and observables and sim conditions should\n # issue a warning\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n observable_num_list=observable_num_list,\n sim_cond_num_list=sim_cond_num_list)\n\n # plotting grouped by something else than datasetIds should issue a\n # warning if datasetsIDs would have been available\n plot_data_and_simulation(data_file_Isensee,\n condition_file_Isensee,\n sim_cond_num_list=sim_cond_num_list)\n\n # test correct number of warnings\n warnings_list = [msg for msg in warnMsg if\n not issubclass(msg.category, DeprecationWarning)]\n assert len(warnings_list) == 4\n\n # test that all warnings were indeed UserWarnings\n for i_warn in warnings_list:\n assert issubclass(i_warn.category, UserWarning)\n\n\ndef test_simple_visualization(data_file_Fujita, condition_file_Fujita):\n plot_measurements_by_observable(data_file_Fujita, condition_file_Fujita)\n plot_measurements_by_observable(data_file_Fujita, condition_file_Fujita,\n plotted_noise=PROVIDED)\n\n\ndef test_save_plots_to_file(data_file_Isensee, condition_file_Isensee,\n vis_spec_file_Isensee, simulation_file_Isensee):\n with TemporaryDirectory() as temp_dir:\n plot_data_and_simulation(\n data_file_Isensee,\n condition_file_Isensee,\n vis_spec_file_Isensee,\n simulation_file_Isensee,\n subplot_file_path=temp_dir)\n\n\ndef test_save_visu_file(data_file_Isensee,\n condition_file_Isensee):\n\n with TemporaryDirectory() as temp_dir:\n save_vis_spec(data_file_Isensee,\n condition_file_Isensee,\n output_file_path=path.join(temp_dir, \"visuSpec.tsv\"))\n\n datasets = [['JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_ctrl',\n 'JI09_150302_Drg345_343_CycNuc__4_ABnOH_and_Fsk'],\n ['JI09_160201_Drg453-452_CycNuc__ctrl',\n 'JI09_160201_Drg453-452_CycNuc__Fsk',\n 'JI09_160201_Drg453-452_CycNuc__Sp8_Br_cAMPS_AM']]\n\n save_vis_spec(data_file_Isensee,\n condition_file_Isensee,\n dataset_id_list=datasets,\n output_file_path=path.join(temp_dir, \"visuSpec1.tsv\"))\n"
] | [
[
"matplotlib.pyplot.close"
]
] |
jeffersonHsieh/tapas | [
"a2f1c8c763c08487bed6b91884dac946dd766ab9"
] | [
"tapas/utils/tf_example_utils.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Utilities for converting interactions to TF examples.\"\"\"\n\nimport collections\nimport dataclasses\nimport hashlib\nimport random\nfrom typing import Iterable, List, Mapping, Optional, Text, Tuple\n\nfrom absl import logging\nfrom apache_beam import metrics as beam_metrics\nfrom tapas.protos import annotated_text_pb2\nfrom tapas.protos import interaction_pb2\nfrom tapas.protos import table_selection_pb2\nfrom tapas.utils import constants\nfrom tapas.utils import interpretation_utils\nfrom tapas.utils import number_annotation_utils\nfrom tapas.utils import sentence_tokenizer\nfrom tapas.utils import text_index\nfrom tapas.utils import text_utils\nimport tensorflow.compat.v1 as tf\n\nfrom official.nlp.bert import tokenization\n\n_NS = 'main'\n_CLS = '[CLS]'\n_EMPTY = '[EMPTY]'\n_MASK = '[MASK]'\n_SEP = '[SEP]'\n_NAN = float('nan')\n_MAX_NUM_CANDIDATES = 1000\n_MAX_NUM_ROWS = 32\n_WP_PER_CELL = 1.5\n_MAX_INDEX_LENGTH = int(_MAX_NUM_CANDIDATES * _MAX_NUM_ROWS * _WP_PER_CELL)\n_MAX_INT = 2**32 - 1\n\n\[email protected](frozen=True)\nclass Token:\n original_text: Text\n piece: Text\n\n\[email protected](frozen=True)\nclass TrainingInstance:\n tokens: List[Token]\n segment_ids: List[int]\n column_ids: List[int]\n row_ids: List[int]\n masked_lm_positions: List[int]\n masked_lm_labels: List[Text]\n is_random_table: bool\n\n\[email protected](frozen=True)\nclass TokenCoordinates:\n column_index: int\n row_index: int\n token_index: int\n\n\[email protected]\nclass TokenizedTable:\n rows: List[List[List[Token]]]\n selected_tokens: List[TokenCoordinates]\n\n\[email protected](frozen=True)\nclass MaskedLmInstance:\n index: int\n label: Text\n\n\[email protected](frozen=True)\nclass ConversionConfig:\n \"\"\"Configues conversion to TF example.\n\n vocab_file: Bert vocab file\n max_seq_length: Max length of a sequence in word pieces.\n max_column_id: Max column id to extract.\n max_row_id: Max row id to extract.\n \"\"\"\n vocab_file: Text\n max_seq_length: int\n max_column_id: int\n max_row_id: int\n strip_column_names: bool\n\n\[email protected](frozen=True)\nclass PretrainConversionConfig(ConversionConfig):\n \"\"\"Configures options speciic to pretraining data creation.\n\n max_predictions_per_seq: Max predictions per sequence for mask task.\n min_question_length: Min question length.\n max_question_length: Max question length.\n always_continue_cells: If true always mask entire cells.\n strip_column_names: If true, add empty strings instead of column names.\n random_seed: Random seed.\n masked_lm_prob: Percentage of tokens to mask.\n concatenate_snippets: If true concatenate snippets in a random fashion.\n \"\"\"\n max_predictions_per_seq: int\n masked_lm_prob: float\n random_seed: int\n min_question_length: int\n max_question_length: int\n always_continue_cells: bool\n concatenate_snippets: bool = True\n\n\[email protected](frozen=True)\nclass TrimmedConversionConfig(ConversionConfig):\n # if > 0: Trim cells so that the length is <= this value.\n # Also disables further cell trimming should thus be used with\n # 'drop_rows_to_fit' below.\n # TODO(thomasmueller) Make this a parameter of the base config.\n # TODO(thomasmueller) Consider giving this a better name.\n cell_trim_length: int = -1\n\n\[email protected](frozen=True)\nclass ClassifierConversionConfig(TrimmedConversionConfig):\n \"\"\"The config used to extract the tf examples for the classifier model.\"\"\"\n add_aggregation_candidates: bool = False\n expand_entity_descriptions: bool = False\n use_entity_title: bool = False\n entity_descriptions_sentence_limit: int = 5\n use_document_title: bool = False\n # Re-computes answer coordinates from the answer text.\n update_answer_coordinates: bool = False\n # Drop last rows if table doesn't fit within max sequence length.\n drop_rows_to_fit: bool = False\n # If true adds the context heading of the table to the question.\n use_context_title: bool = False\n # For TPU prediction we serialize strings into a fix length.\n trim_question_ids: bool = False\n # For each data split how to up/down sample the dataset\n label_sampling_rate: Mapping[Tuple[Text, int],\n float] = dataclasses.field(default_factory=dict)\n is_multi_hop: bool = False\n # self._is_multi_hop = config.is_multi_hop\n use_bridge_entity: bool = False\n # self._use_bridge_entity = config.use_bridge_entity\n use_question_type: bool = False\n # self._use_question_type = config.use_question_type\n\n\[email protected](frozen=True)\nclass RetrievalConversionConfig(TrimmedConversionConfig):\n use_document_title: bool = True\n use_section_title: bool = False\n use_caption: bool = False\n use_abbv: bool = False\n use_header: bool = True\n use_content: bool = True\n oracle_abbv_expansion: bool = False\n\n\[email protected](frozen=True)\nclass SerializedExample:\n tokens: List[Token]\n column_ids: List[int]\n row_ids: List[int]\n segment_ids: List[int]\n\n\ndef copy_vocab(input_vocab, output_vocab):\n \"\"\"Copies vocabulary file and add [EMPTY] token.\"\"\"\n with tf.io.gfile.GFile(input_vocab) as input_vocab_file:\n with tf.io.gfile.GFile(output_vocab, 'w') as output_vocab_file:\n for token in input_vocab_file:\n output_vocab_file.write('[EMPTY]\\n' if token ==\n '[unused0]\\n' else token)\n\n\ndef _get_pieces(tokens):\n return (token.piece for token in tokens)\n\n\ndef fingerprint(text):\n return int(hashlib.sha256(text.encode('utf-8')).hexdigest(), 16)\n\n\ndef create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n\ndef create_float_feature(values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n\n\ndef create_string_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=list(values)))\n\n\ndef _is_inner_wordpiece(token):\n return token.piece.startswith('##')\n\n\ndef _get_cell_token_indexes(column_ids, row_ids,\n column_id, row_id):\n for index in range(len(column_ids)):\n if (column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id):\n yield index\n\n\ndef _get_buckets(value, buckets, name):\n for bucket_value in buckets:\n if value <= bucket_value:\n return '%s: <= %d' % (name, bucket_value)\n return '%s: < inf' % (name)\n\n\ndef _get_all_answer_ids_from_coordinates(\n column_ids,\n row_ids,\n answers_list,\n):\n \"\"\"Maps lists of answer coordinates to token indexes.\"\"\"\n answer_ids = [0] * len(column_ids)\n found_answers = set()\n all_answers = set()\n for answers in answers_list:\n for column_index, row_index in answers:\n all_answers.add((column_index, row_index))\n for index in _get_cell_token_indexes(column_ids, row_ids, column_index,\n row_index):\n found_answers.add((column_index, row_index))\n answer_ids[index] = 1\n\n missing_count = len(all_answers) - len(found_answers)\n buckets = [1, 2, 3, 4, 5, 10, 25, 50, 100]\n if missing_count:\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(missing_count, buckets, 'Missing answers')).inc()\n if found_answers:\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(len(found_answers), buckets, 'Found answers')).inc()\n return answer_ids, missing_count\n\n\ndef _get_all_answer_ids(\n column_ids,\n row_ids,\n questions,\n):\n \"\"\"Maps lists of questions with answer coordinates to token indexes.\"\"\"\n\n def _to_coordinates(\n question,):\n return [(coords.column_index, coords.row_index)\n for coords in question.answer.answer_coordinates]\n\n return _get_all_answer_ids_from_coordinates(\n column_ids,\n row_ids,\n answers_list=(_to_coordinates(question) for question in questions),\n )\n\n\ndef _find_tokens(text, segment):\n \"\"\"Return start index of segment in text or None.\"\"\"\n logging.info('text: %s %s', text, segment)\n for index in range(1 + len(text) - len(segment)):\n for seg_index, seg_token in enumerate(segment):\n if text[index + seg_index].piece != seg_token.piece:\n break\n else:\n return index\n return None\n\n\ndef _find_answer_coordinates_from_answer_text(\n tokenized_table,\n answer_text,\n):\n \"\"\"Returns all occurrences of answer_text in the table.\"\"\"\n logging.info('answer text: %s', answer_text)\n for row_index, row in enumerate(tokenized_table.rows):\n if row_index == 0:\n # We don't search for answers in the header.\n continue\n for col_index, cell in enumerate(row):\n token_index = _find_tokens(cell, answer_text)\n if token_index is not None:\n yield TokenCoordinates(\n row_index=row_index,\n column_index=col_index,\n token_index=token_index,\n )\n\n\ndef _find_answer_ids_from_answer_texts(\n column_ids,\n row_ids,\n tokenized_table,\n answer_texts,\n):\n \"\"\"Maps question with answer texts to the first matching token indexes.\"\"\"\n answer_ids = [0] * len(column_ids)\n for answer_text in answer_texts:\n found_answer_text = False\n found_answer_text_ids = False\n for coordinates in _find_answer_coordinates_from_answer_text(\n tokenized_table,\n answer_text,\n ):\n found_answer_text = True\n # Maps answer coordinates to indexes this can fail if tokens / rows have\n # been pruned.\n indexes = list(\n _get_cell_token_indexes(\n column_ids,\n row_ids,\n column_id=coordinates.column_index,\n row_id=coordinates.row_index - 1,\n ))\n indexes.sort()\n coordinate_answer_ids = []\n if indexes:\n begin_index = coordinates.token_index + indexes[0]\n end_index = begin_index + len(answer_text)\n for index in indexes:\n if index >= begin_index and index < end_index:\n coordinate_answer_ids.append(index)\n if len(coordinate_answer_ids) == len(answer_text):\n found_answer_text_ids = True\n for index in coordinate_answer_ids:\n answer_ids[index] = 1\n break\n beam_metrics.Metrics.counter(_NS, 'Answer texts: total').inc()\n if found_answer_text:\n beam_metrics.Metrics.counter(_NS, 'Answer texts: found').inc()\n if found_answer_text_ids:\n beam_metrics.Metrics.counter(_NS, 'Answer texts: found ids').inc()\n\n return answer_ids\n\n\ndef _get_answer_ids(column_ids, row_ids,\n question):\n \"\"\"Maps answer coordinates to token indexes.\"\"\"\n answer_ids, missing_count = _get_all_answer_ids(column_ids, row_ids,\n [question])\n\n if missing_count:\n raise ValueError(\"Couldn't find all answers\")\n return answer_ids\n\n\ndef _get_annotation_name(identifier):\n \"\"\"Extracts the clean title from a Wikipedia identifier.\"\"\"\n # Example input: /wiki/New_York_City -> New York City\n return identifier.split('/')[-1].replace('_', ' ')\n\n\ndef _add_entity_descriptions_to_table(\n question,\n descriptions,\n table,\n use_entity_title,\n num_results,\n):\n \"\"\"Expand table cells with the descriptions of the entities mentioned.\n\n This function will add entity descriptions inside the Table proto by expanding\n the content of each cell according to entities mentioned in that cell. The\n sentences in the descriptions will be ranked by similarity to the question and\n only the top results will be included.\n\n Args:\n question: Question proto containing the question text. The text will be used\n to filter only a subset of the descriptions using a similarity criteria.\n descriptions: A map that contains for entity id, its textual description.\n table: Table to be modified in-place. Some cells may contain annotation\n extensions with entity ids that will be expanded with their descriptions.\n use_entity_title: Prepend the entity title to entity descriptions.\n num_results: Limit on the number of entities to expand with a description.\n \"\"\"\n descriptions = {\n key: sentence_tokenizer.tokenize(description)\n for key, description in descriptions.items()\n }\n documents = []\n for sentences in descriptions.values():\n for sentence in sentences:\n documents.append(sentence)\n\n search_results = text_index.TextIndex(documents).search(\n question.text, num_results=num_results)\n logging.log_first_n(logging.INFO,\n '%s selected entity annotations for %s: %s', 100,\n question.id, question.text, search_results)\n search_results_set = {r.text for r in search_results}\n\n buckets = [1, 2, 3, 4, 5, 10, 25, 50, 100]\n sentences_kept = len(search_results_set)\n sentences_discarded = len(documents) - sentences_kept\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(sentences_kept, buckets, 'Descriptions kept')).inc()\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(sentences_discarded, buckets,\n 'Descriptions discarded')).inc()\n\n annotated_cell_ext = annotated_text_pb2.AnnotatedText.annotated_cell_ext\n for row in table.rows:\n for cell in row.cells:\n if annotated_cell_ext in cell.Extensions:\n for annotation in cell.Extensions[annotated_cell_ext].annotations:\n sentences = descriptions[annotation.identifier]\n filtered_sentences = ' '.join(\n sent for sent in sentences if sent in search_results_set)\n if filtered_sentences:\n if use_entity_title:\n annotation_name = _get_annotation_name(annotation.identifier)\n cell.text += f' ( {annotation_name} : {filtered_sentences} )'\n else:\n cell.text += f' ( {filtered_sentences} )'\n\n\nclass TapasTokenizer:\n \"\"\"Wraps a Bert tokenizer.\"\"\"\n\n def __init__(self, vocab_file):\n self._basic_tokenizer = tokenization.BasicTokenizer(do_lower_case=True)\n self._wp_tokenizer = tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=True)\n\n def get_vocab(self):\n return self._wp_tokenizer.vocab.keys()\n\n def tokenize(self, text):\n if text_utils.format_text(text) == constants.EMPTY_TEXT:\n return [Token(_EMPTY, _EMPTY)]\n tokens = []\n for token in self._basic_tokenizer.tokenize(text):\n for piece in self._wp_tokenizer.tokenize(token):\n tokens.append(Token(token, piece))\n return tokens\n\n def convert_tokens_to_ids(self, word_pieces):\n return self._wp_tokenizer.convert_tokens_to_ids(word_pieces)\n\n def question_encoding_cost(self, question_tokens):\n # Two extra spots of SEP and CLS.\n return len(question_tokens) + 2\n\n\nclass ToTensorflowExampleBase:\n \"\"\"Base class for converting interactions to TF examples.\"\"\"\n\n def __init__(self, config):\n self._max_seq_length = config.max_seq_length\n self._max_column_id = config.max_column_id\n self._max_row_id = config.max_row_id\n self._strip_column_names = config.strip_column_names\n self._tokenizer = TapasTokenizer(config.vocab_file)\n\n def _tokenize_table(\n self,\n table,\n ):\n \"\"\"Runs tokenizer over columns and table cell texts.\"\"\"\n tokenized_rows = []\n tokenized_row = []\n for column in table.columns:\n if self._strip_column_names:\n tokenized_row.append(self._tokenizer.tokenize(''))\n else:\n tokenized_row.append(self._tokenizer.tokenize(column.text))\n tokenized_rows.append(tokenized_row)\n\n for row in table.rows:\n tokenized_row = []\n for cell in row.cells:\n tokenized_row.append(self._tokenizer.tokenize(cell.text))\n tokenized_rows.append(tokenized_row)\n\n token_coordinates = []\n for row_index, row in enumerate(tokenized_rows):\n for column_index, cell in enumerate(row):\n for token_index, _ in enumerate(cell):\n token_coordinates.append(\n TokenCoordinates(\n row_index=row_index,\n column_index=column_index,\n token_index=token_index,\n ))\n\n return TokenizedTable(\n rows=tokenized_rows,\n selected_tokens=token_coordinates,\n )\n\n def _get_table_values(self, table, num_columns,\n num_rows,\n num_tokens):\n \"\"\"Iterates over partial table and returns token, col. and row indexes.\"\"\"\n for tc in table.selected_tokens:\n # First row is header row.\n if tc.row_index >= num_rows + 1:\n continue\n if tc.column_index >= num_columns:\n continue\n cell = table.rows[tc.row_index][tc.column_index]\n token = cell[tc.token_index]\n word_begin_index = tc.token_index\n # Don't add partial words. Find the starting word piece and check if it\n # fits in the token budget.\n while (word_begin_index >= 0 and\n _is_inner_wordpiece(cell[word_begin_index])):\n word_begin_index -= 1\n if word_begin_index >= num_tokens:\n continue\n yield token, tc.column_index + 1, tc.row_index\n\n def _serialize_text(\n self, question_tokens\n ):\n \"\"\"Serialzes texts in index arrays.\"\"\"\n tokens = []\n segment_ids = []\n column_ids = []\n row_ids = []\n\n tokens.append(Token(_CLS, _CLS))\n segment_ids.append(0)\n column_ids.append(0)\n row_ids.append(0)\n\n for token in question_tokens:\n tokens.append(token)\n segment_ids.append(0)\n column_ids.append(0)\n row_ids.append(0)\n\n return tokens, segment_ids, column_ids, row_ids\n\n def _serialize(\n self,\n question_tokens,\n table,\n num_columns,\n num_rows,\n num_tokens,\n ):\n \"\"\"Serializes table and text.\"\"\"\n tokens, segment_ids, column_ids, row_ids = self._serialize_text(\n question_tokens)\n\n tokens.append(Token(_SEP, _SEP))\n segment_ids.append(0)\n column_ids.append(0)\n row_ids.append(0)\n\n for token, column_id, row_id in self._get_table_values(\n table, num_columns, num_rows, num_tokens):\n tokens.append(token)\n segment_ids.append(1)\n column_ids.append(column_id)\n row_ids.append(row_id)\n\n return SerializedExample(\n tokens=tokens,\n segment_ids=segment_ids,\n column_ids=column_ids,\n row_ids=row_ids,\n )\n\n def _tokenize(self, text):\n return self._tokenizer.tokenize(text)\n\n def _get_token_budget(self, question_tokens):\n return self._max_seq_length - self._tokenizer.question_encoding_cost(\n question_tokens)\n\n def _get_table_boundaries(self,\n table):\n \"\"\"Return maximal number of rows, columns and tokens.\"\"\"\n max_num_tokens = 0\n max_num_columns = 0\n max_num_rows = 0\n for tc in table.selected_tokens:\n max_num_columns = max(max_num_columns, tc.column_index + 1)\n max_num_rows = max(max_num_rows, tc.row_index + 1)\n max_num_tokens = max(max_num_tokens, tc.token_index + 1)\n max_num_columns = min(self._max_column_id, max_num_columns)\n max_num_rows = min(self._max_row_id, max_num_rows)\n return max_num_rows, max_num_columns, max_num_tokens\n\n def _get_table_cost(self, table, num_columns,\n num_rows, num_tokens):\n return sum(1 for _ in self._get_table_values(table, num_columns, num_rows,\n num_tokens))\n\n def _get_column_values(\n self, table,\n col_index):\n table_numeric_values = {}\n for row_index, row in enumerate(table.rows):\n cell = row.cells[col_index]\n if cell.HasField('numeric_value'):\n table_numeric_values[row_index] = cell.numeric_value\n return table_numeric_values\n\n def _add_numeric_column_ranks(self, column_ids, row_ids,\n table,\n features):\n \"\"\"Adds column ranks for all numeric columns.\"\"\"\n\n ranks = [0] * len(column_ids)\n inv_ranks = [0] * len(column_ids)\n\n if table:\n for col_index in range(len(table.columns)):\n table_numeric_values = self._get_column_values(table, col_index)\n if not table_numeric_values:\n continue\n\n try:\n key_fn = number_annotation_utils.get_numeric_sort_key_fn(\n table_numeric_values.values())\n except ValueError:\n continue\n\n table_numeric_values = {\n row_index: key_fn(value)\n for row_index, value in table_numeric_values.items()\n }\n\n table_numeric_values_inv = collections.defaultdict(list)\n for row_index, value in table_numeric_values.items():\n table_numeric_values_inv[value].append(row_index)\n\n unique_values = sorted(table_numeric_values_inv.keys())\n\n for rank, value in enumerate(unique_values):\n for row_index in table_numeric_values_inv[value]:\n for index in _get_cell_token_indexes(column_ids, row_ids, col_index,\n row_index):\n ranks[index] = rank + 1\n inv_ranks[index] = len(unique_values) - rank\n\n features['column_ranks'] = create_int_feature(ranks)\n features['inv_column_ranks'] = create_int_feature(inv_ranks)\n\n def _get_numeric_sort_key_fn(self, table_numeric_values, value):\n \"\"\"Returns the sort key function for comparing value to table values.\n\n The function returned will be a suitable input for the key param of the\n sort(). See number_annotation_utils._get_numeric_sort_key_fn for details.\n\n Args:\n table_numeric_values: Numeric values of a column\n value: Numeric value in the question.\n\n Returns:\n A function key function to compare column and question values.\n\n \"\"\"\n if not table_numeric_values:\n return None\n all_values = list(table_numeric_values.values())\n all_values.append(value)\n try:\n return number_annotation_utils.get_numeric_sort_key_fn(all_values)\n except ValueError:\n return None\n\n def _add_numeric_relations(self, question,\n column_ids, row_ids,\n table,\n features):\n \"\"\"Adds numeric relation emebeddings to 'features'.\n\n Args:\n question: The question, numeric values are used.\n column_ids: Maps word piece position to column id.\n row_ids: Maps word piece position to row id.\n table: The table containing the numeric cell values.\n features: Output.\n \"\"\"\n\n numeric_relations = [0 for _ in column_ids]\n\n # Create a dictionary that maps a table cell to the set of all relations\n # this cell has with any value in the question.\n cell_indices_to_relations = collections.defaultdict(set)\n if question is not None and table is not None:\n for numeric_value_span in question.annotations.spans:\n for value in numeric_value_span.values:\n for column_index in range(len(table.columns)):\n table_numeric_values = self._get_column_values(table, column_index)\n sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values,\n value)\n if sort_key_fn is None:\n continue\n for row_index, cell_value in table_numeric_values.items():\n relation = number_annotation_utils.get_numeric_relation(\n value, cell_value, sort_key_fn)\n if relation is not None:\n cell_indices_to_relations[column_index, row_index].add(relation)\n\n # For each cell add a special feature for all its word pieces.\n for (column_index,\n row_index), relations in cell_indices_to_relations.items():\n relation_set_index = 0\n for relation in relations:\n assert relation.value >= constants.Relation.EQ.value\n relation_set_index += 2**(relation.value - constants.Relation.EQ.value)\n beam_metrics.Metrics.counter(\n _NS, 'Relation Set Index: %d' % relation_set_index).inc()\n for cell_token_index in _get_cell_token_indexes(column_ids, row_ids,\n column_index, row_index):\n numeric_relations[cell_token_index] = relation_set_index\n\n features['numeric_relations'] = create_int_feature(numeric_relations)\n\n def _add_numeric_values(self, table,\n token_ids_dict,\n features):\n \"\"\"Adds numeric values for computation of answer loss.\"\"\"\n numeric_values = [_NAN] * self._max_seq_length\n if table:\n for col_index in range(len(table.columns)):\n for row_index in range(len(table.rows)):\n\n numeric_value = table.rows[row_index].cells[col_index].numeric_value\n if not numeric_value.HasField('float_value'):\n continue\n\n float_value = numeric_value.float_value\n if float_value == float('inf'):\n beam_metrics.Metrics.counter(\n _NS, 'cell with numeric value of infinite').inc()\n continue\n\n for index in _get_cell_token_indexes(token_ids_dict['column_ids'],\n token_ids_dict['row_ids'],\n col_index, row_index):\n numeric_values[index] = float_value\n features['numeric_values'] = create_float_feature(numeric_values)\n\n def _add_numeric_values_scale(self, table, token_ids_dict, features):\n \"\"\"Adds a scale to each token to down weigh the value of long words.\"\"\"\n numeric_values_scale = [1.0] * self._max_seq_length\n if not table:\n return numeric_values_scale\n for col_index in range(len(table.columns)):\n for row_index in range(len(table.rows)):\n indices = [\n index for index in _get_cell_token_indexes(\n token_ids_dict['column_ids'], token_ids_dict['row_ids'],\n col_index, row_index)\n ]\n num_indices = len(indices)\n if num_indices > 1:\n for index in indices:\n numeric_values_scale[index] = float(num_indices)\n features['numeric_values_scale'] = create_float_feature(\n numeric_values_scale)\n\n def _pad_to_seq_length(self, inputs):\n while len(inputs) > self._max_seq_length:\n inputs.pop()\n while len(inputs) < self._max_seq_length:\n inputs.append(0)\n\n def _to_token_ids(self, tokens):\n return self._tokenizer.convert_tokens_to_ids(_get_pieces(tokens))\n\n def _to_features(\n self, tokens, token_ids_dict,\n table,\n question):\n \"\"\"Produces a dict of TF features.\"\"\"\n tokens = list(tokens)\n token_ids_dict = {\n key: list(values) for key, values in token_ids_dict.items()\n }\n\n length = len(tokens)\n for values in token_ids_dict.values():\n if len(values) != length:\n raise ValueError('Inconsistent length')\n\n input_ids = self._to_token_ids(tokens)\n input_mask = [1] * len(input_ids)\n\n self._pad_to_seq_length(input_ids)\n self._pad_to_seq_length(input_mask)\n for values in token_ids_dict.values():\n self._pad_to_seq_length(values)\n\n assert len(input_ids) == self._max_seq_length\n assert len(input_mask) == self._max_seq_length\n for values in token_ids_dict.values():\n assert len(values) == self._max_seq_length\n\n features = collections.OrderedDict()\n features['input_ids'] = create_int_feature(input_ids)\n features['input_mask'] = create_int_feature(input_mask)\n for key, values in sorted(token_ids_dict.items()):\n features[key] = create_int_feature(values)\n\n self._add_numeric_column_ranks(token_ids_dict['column_ids'],\n token_ids_dict['row_ids'], table, features)\n\n self._add_numeric_relations(question, token_ids_dict['column_ids'],\n token_ids_dict['row_ids'], table, features)\n\n self._add_numeric_values(table, token_ids_dict, features)\n\n self._add_numeric_values_scale(table, token_ids_dict, features)\n\n if table:\n features['table_id'] = create_string_feature(\n [table.table_id.encode('utf8')])\n features['table_id_hash'] = create_int_feature(\n [fingerprint(table.table_id) % _MAX_INT])\n return features\n\n\nclass ToPretrainingTensorflowExample(ToTensorflowExampleBase):\n \"\"\"Class for converting pretraining examples.\"\"\"\n\n def __init__(self, config):\n super(ToPretrainingTensorflowExample, self).__init__(config)\n self._max_predictions_per_seq = config.max_predictions_per_seq\n self._masked_lm_prob = config.masked_lm_prob\n self._min_question_length = config.min_question_length\n self._max_question_length = config.max_question_length\n self._concatenate_snippets = config.concatenate_snippets\n self._always_continue_cells = config.always_continue_cells\n self._question_buckets = [\n self._min_question_length,\n (self._min_question_length + self._max_question_length) / 2,\n self._max_question_length\n ]\n self._vocab_words = list(self._tokenizer.get_vocab())\n\n def _to_example(self, table,\n instance):\n \"\"\"Creates TF example from TrainingInstance.\"\"\"\n\n features = self._to_features(\n instance.tokens, {\n 'column_ids': instance.column_ids,\n 'prev_label_ids': [0] * len(instance.tokens),\n 'row_ids': instance.row_ids,\n 'segment_ids': instance.segment_ids,\n },\n table=table,\n question=None)\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = self._tokenizer.convert_tokens_to_ids(\n instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < self._max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n is_random_table = 1 if instance.is_random_table else 0\n\n features['masked_lm_positions'] = create_int_feature(masked_lm_positions)\n features['masked_lm_ids'] = create_int_feature(masked_lm_ids)\n features['masked_lm_weights'] = create_float_feature(masked_lm_weights)\n features['next_sentence_labels'] = create_int_feature([is_random_table])\n features['is_random_table'] = create_int_feature([is_random_table])\n\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n def convert(\n self,\n rng,\n interaction,\n random_table,\n ):\n \"\"\"Creates TF example from interaction.\"\"\"\n question_tokens = self._get_question_tokens(interaction, rng)\n\n if random_table is not None and rng.random() < 0.5:\n is_random_table = True\n table = random_table\n else:\n is_random_table = False\n if interaction.HasField('table'):\n table = interaction.table\n else:\n table = None\n\n if table is None:\n beam_metrics.Metrics.counter(_NS, 'Examples without tables').inc()\n question_tokens = self._tokenizer.tokenize(\n interaction.questions[0].original_text)\n question_tokens = question_tokens[:self._max_seq_length - 1]\n tokens, segment_ids, column_ids, row_ids = self._serialize_text(\n question_tokens)\n else:\n if (not question_tokens or\n len(question_tokens) < self._min_question_length):\n beam_metrics.Metrics.counter(\n _NS,\n f'Remove question below the min length {self._min_question_length}'\n ).inc()\n return None\n beam_metrics.Metrics.counter(_NS, 'Examples with tables').inc()\n beam_metrics.Metrics.counter(\n _NS,\n _get_buckets(\n len(question_tokens), self._question_buckets,\n 'Question Length')).inc()\n if random_table is not None:\n logging.log_every_n(logging.INFO,\n 'Table: %s Random Table: %s is_random_table: %s',\n 500000, interaction.table.table_id,\n random_table.table_id, is_random_table)\n\n token_budget = self._get_token_budget(question_tokens)\n tokenized_table = self._tokenize_table(table)\n try:\n num_columns, num_rows, num_tokens = self._get_table_sizes(\n token_budget, tokenized_table, rng)\n except ValueError:\n return None\n\n serialized_example = self._serialize(question_tokens, tokenized_table,\n num_columns, num_rows, num_tokens)\n tokens = serialized_example.tokens\n segment_ids = serialized_example.segment_ids\n row_ids = serialized_example.row_ids\n column_ids = serialized_example.column_ids\n\n assert len(tokens) <= self._max_seq_length\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = self._create_masked_lm_predictions(\n interaction, tokens, column_ids, row_ids, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n column_ids=column_ids,\n row_ids=row_ids,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels,\n is_random_table=is_random_table)\n return self._to_example(table, instance)\n\n def _create_masked_lm_predictions(\n self, interaction, tokens,\n column_ids, row_ids,\n rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token.piece in [_CLS, _SEP]:\n continue\n column_id = column_ids[i]\n is_cell_continutation = column_id > 0 and column_id == column_ids[i - 1]\n if not self._always_continue_cells:\n is_cell_continutation = False\n if cand_indexes and (_is_inner_wordpiece(token) or is_cell_continutation):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(self._max_predictions_per_seq,\n max(1, int(round(len(tokens) * self._masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n\n for index in index_set:\n assert index not in covered_indexes\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = _MASK\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index].piece\n # 10% of the time, replace with random word\n else:\n masked_token = rng.choice(self._vocab_words)\n\n output_tokens[index] = Token(tokens[index].original_text, masked_token)\n\n masked_lms.append(\n MaskedLmInstance(index=index, label=tokens[index].piece))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n def _get_question_tokens(self, interaction,\n rng):\n \"\"\"Randomly gets a snippet of relevant text.\"\"\"\n questions = [q.text for q in interaction.questions]\n rng.shuffle(questions)\n if not self._concatenate_snippets:\n # Find the first snippet that satisfies the requirements.\n for question in questions:\n tokens = self._tokenizer.tokenize(question)\n if len(tokens) > self._max_question_length:\n continue\n if len(tokens) < self._min_question_length:\n continue\n return tokens\n return None\n tokens = []\n for question in questions:\n tokens += self._tokenizer.tokenize(question)\n\n if len(tokens) < self._min_question_length:\n return None\n\n max_start_index = len(tokens) - self._min_question_length\n start_index = rng.randint(0, max_start_index)\n while start_index >= 0 and _is_inner_wordpiece(tokens[start_index]):\n start_index -= 1\n\n min_end_index = start_index + self._min_question_length\n max_end_index = min(len(tokens), self._max_question_length + start_index)\n assert min_end_index <= max_end_index\n end_index = rng.randint(min_end_index, max_end_index)\n assert (self._min_question_length <= end_index - start_index <=\n self._max_question_length)\n while end_index < len(tokens) and _is_inner_wordpiece(tokens[end_index]):\n end_index += 1\n\n return tokens[start_index:end_index]\n\n def _get_table_sizes(self, token_budget, table,\n rng):\n \"\"\"Computes column, row and token count for table.\"\"\"\n num_columns = 1\n num_rows = 1\n num_tokens = 1\n table_cost = self._get_table_cost(table, num_columns, num_rows, num_tokens)\n if table_cost > token_budget:\n raise ValueError('Cannot create table that fits budget')\n\n max_num_rows, max_num_columns, max_num_tokens = self._get_table_boundaries(\n table)\n\n while (num_columns < max_num_columns or num_rows < max_num_rows or\n num_tokens < max_num_tokens):\n if num_columns < max_num_columns and rng.random() < 0.5:\n cost = self._get_table_cost(table, num_columns + 1, num_rows,\n num_tokens)\n if cost > token_budget:\n break\n num_columns += 1\n if num_rows < max_num_rows and rng.random() < 0.5:\n cost = self._get_table_cost(table, num_columns, num_rows + 1,\n num_tokens)\n if cost > token_budget:\n break\n num_rows += 1\n if num_tokens < max_num_tokens and rng.random() < 0.5:\n cost = self._get_table_cost(table, num_columns, num_rows,\n num_tokens + 1)\n if cost > token_budget:\n break\n num_tokens += 1\n\n buckets = [8, 16, 32, 64, 128, 256]\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(num_columns * num_rows, buckets,\n 'Trimmed Table Size')).inc()\n\n # First row is the header.\n real_num_columns = len(table.rows[0]) if table.rows else 0\n # We don't count the header row.\n real_num_rows = len(table.rows) - 1\n\n beam_metrics.Metrics.counter(\n _NS,\n _get_buckets(real_num_columns * real_num_rows, buckets,\n 'Real Table Size')).inc()\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(num_columns, buckets, 'Column Sizes')).inc()\n beam_metrics.Metrics.counter(_NS,\n _get_buckets(num_rows, buckets,\n 'Row Sizes')).inc()\n beam_metrics.Metrics.counter(\n _NS, _get_buckets(num_tokens, buckets, 'Table Token Sizes')).inc()\n\n return num_columns, num_rows, num_tokens\n\n\nclass ToTrimmedTensorflowExample(ToTensorflowExampleBase):\n \"\"\"Helper that allows squeezing a table into the max seq length.\"\"\"\n\n def __init__(self, config):\n super(ToTrimmedTensorflowExample, self).__init__(config)\n self._cell_trim_length = config.cell_trim_length\n\n def _get_num_columns(self, table):\n num_columns = len(table.columns)\n if num_columns >= self._max_column_id:\n raise ValueError('Too many columns')\n return num_columns\n\n def _get_num_rows(self, table,\n drop_rows_to_fit):\n num_rows = len(table.rows)\n if num_rows >= self._max_row_id:\n if drop_rows_to_fit:\n num_rows = self._max_row_id - 1\n else:\n raise ValueError('Too many rows')\n return num_rows\n\n def _to_trimmed_features(\n self,\n question,\n table,\n question_tokens,\n tokenized_table,\n num_columns,\n num_rows,\n drop_rows_to_fit = False,\n ):\n \"\"\"Finds optiomal number of table tokens to include and serializes.\"\"\"\n init_num_rows = num_rows\n while True:\n num_tokens = self._get_max_num_tokens(\n question_tokens,\n tokenized_table,\n num_rows=num_rows,\n num_columns=num_columns,\n )\n if num_tokens is not None:\n # We could fit the table.\n break\n if not drop_rows_to_fit or num_rows == 0:\n raise ValueError('Sequence too long')\n # Try to drop a row to fit the table.\n num_rows -= 1\n if init_num_rows != num_rows:\n beam_metrics.Metrics.counter(_NS, 'Tables with trimmed rows').inc()\n serialized_example = self._serialize(question_tokens, tokenized_table,\n num_columns, num_rows, num_tokens)\n\n assert len(serialized_example.tokens) <= self._max_seq_length\n\n feature_dict = {\n 'column_ids': serialized_example.column_ids,\n 'row_ids': serialized_example.row_ids,\n 'segment_ids': serialized_example.segment_ids,\n }\n features = self._to_features(\n serialized_example.tokens, feature_dict, table=table, question=question)\n return serialized_example, features\n\n def _get_max_num_tokens(\n self,\n question_tokens,\n tokenized_table,\n num_columns,\n num_rows,\n ):\n \"\"\"Computes max number of tokens that can be squeezed into the budget.\"\"\"\n token_budget = self._get_token_budget(question_tokens)\n _, _, max_num_tokens = self._get_table_boundaries(tokenized_table)\n if self._cell_trim_length >= 0 and max_num_tokens > self._cell_trim_length:\n max_num_tokens = self._cell_trim_length\n num_tokens = 0\n for num_tokens in range(max_num_tokens + 1):\n cost = self._get_table_cost(tokenized_table, num_columns, num_rows,\n num_tokens + 1)\n if cost > token_budget:\n break\n if num_tokens < max_num_tokens:\n if self._cell_trim_length >= 0:\n # We don't allow dynamic trimming if a cell_trim_length is set.\n return None\n if num_tokens == 0:\n return None\n beam_metrics.Metrics.counter(_NS, 'Tables with trimmed cells').inc()\n return num_tokens\n\n\nclass ToClassifierTensorflowExample(ToTrimmedTensorflowExample):\n \"\"\"Class for converting finetuning examples.\"\"\"\n\n def __init__(self, config):\n super(ToClassifierTensorflowExample, self).__init__(config)\n self._add_aggregation_candidates = config.add_aggregation_candidates\n self._use_document_title = config.use_document_title\n self._use_context_title = config.use_context_title\n self._update_answer_coordinates = config.update_answer_coordinates\n self._drop_rows_to_fit = config.drop_rows_to_fit\n self._trim_question_ids = config.trim_question_ids\n self._expand_entity_descriptions = config.expand_entity_descriptions\n self._use_entity_title = config.use_entity_title\n self._entity_descriptions_sentence_limit = config.entity_descriptions_sentence_limit\n \n # TODO (Chia-Chun)\n self._is_multi_hop = config.is_multi_hop\n self._use_bridge_entity = config.use_bridge_entity\n self._use_question_type = config.use_question_type\n\n def _tokenize_extended_question(\n self,\n question,\n table,\n ):\n \"\"\"Runs tokenizer over the question text and document title if it's used.\"\"\"\n # (Chia-Chun): \n text_tokens = []\n if self._use_question_type:\n question_type_tokens = self._tokenizer.tokenize(question.question_type)\n text_tokens.extend(question_type_tokens)\n text_tokens.append(Token(_SEP, _SEP))\n if self._is_multi_hop:\n hop_tokens = self._tokenizer.tokenize(\"Hop is \" + str(question.hop))\n text_tokens.extend(hop_tokens)\n text_tokens.append(Token(_SEP, _SEP))\n\n if self._use_bridge_entity:\n for bridge_entity in question.bridge_entities:\n bridge_entity_tokens = self._tokenizer.tokenize(bridge_entity)\n text_tokens.extend(bridge_entity_tokens)\n text_tokens.append(Token(_SEP, _SEP))\n\n \n\n\n question_tokens = self._tokenizer.tokenize(question.text)\n text_tokens.extend(list(question_tokens))\n #import pdb;pdb.set_trace()\n if self._use_document_title and table.document_title:\n # TODO(thomasmueller) Consider adding a different segment id.\n document_title_tokens = self._tokenizer.tokenize(table.document_title)\n text_tokens.append(Token(_SEP, _SEP))\n text_tokens.extend(document_title_tokens)\n context_heading = table.context_heading\n if self._use_context_title and context_heading:\n context_title_tokens = self._tokenizer.tokenize(context_heading)\n text_tokens.append(Token(_SEP, _SEP))\n text_tokens.extend(context_title_tokens)\n \n\n\n\n return text_tokens\n\n def convert(self, interaction,\n index):\n \"\"\"Converts question at 'index' to example.\"\"\"\n table = interaction.table\n\n num_rows = self._get_num_rows(table, self._drop_rows_to_fit)\n num_columns = self._get_num_columns(table)\n\n question = interaction.questions[index]\n #debug, disable here\n if not interaction.questions[index].answer.is_valid:\n beam_metrics.Metrics.counter(\n _NS, 'Conversion skipped (answer not valid)').inc()\n raise ValueError('Invalid answer')\n\n annotation_descriptions_ext = (\n annotated_text_pb2.AnnotationDescription.annotation_descriptions_ext)\n if (self._expand_entity_descriptions and\n annotation_descriptions_ext in interaction.Extensions):\n descriptions = interaction.Extensions[\n annotation_descriptions_ext].descriptions\n _add_entity_descriptions_to_table(\n question,\n descriptions,\n table,\n use_entity_title=self._use_entity_title,\n num_results=self._entity_descriptions_sentence_limit)\n\n text_tokens = self._tokenize_extended_question(question, table)\n tokenized_table = self._tokenize_table(table)\n table_selection_ext = table_selection_pb2.TableSelection.table_selection_ext\n if table_selection_ext in question.Extensions:\n table_selection = question.Extensions[table_selection_ext]\n if not tokenized_table.selected_tokens:\n raise ValueError('No tokens selected')\n if table_selection.selected_tokens:\n selected_tokens = {(t.row_index, t.column_index, t.token_index)\n for t in table_selection.selected_tokens}\n tokenized_table.selected_tokens = [\n t for t in tokenized_table.selected_tokens\n if (t.row_index, t.column_index, t.token_index) in selected_tokens\n ]\n\n serialized_example, features = self._to_trimmed_features(\n question=question,\n table=table,\n question_tokens=text_tokens,\n tokenized_table=tokenized_table,\n num_columns=num_columns,\n num_rows=num_rows,\n drop_rows_to_fit=self._drop_rows_to_fit)\n\n column_ids = serialized_example.column_ids\n row_ids = serialized_example.row_ids\n\n def get_answer_ids(question):\n if self._update_answer_coordinates:\n return _find_answer_ids_from_answer_texts(\n column_ids,\n row_ids,\n tokenized_table,\n answer_texts=[\n self._tokenizer.tokenize(at)\n for at in question.answer.answer_texts\n ],\n )\n return _get_answer_ids(column_ids, row_ids, question)\n\n answer_ids = get_answer_ids(question)\n self._pad_to_seq_length(answer_ids)\n features['label_ids'] = create_int_feature(answer_ids)\n\n if index > 0:\n prev_answer_ids = get_answer_ids(interaction.questions[index - 1],)\n else:\n prev_answer_ids = [0] * len(column_ids)\n self._pad_to_seq_length(prev_answer_ids)\n features['prev_label_ids'] = create_int_feature(prev_answer_ids)\n features['question_id'] = create_string_feature(\n [question.id.encode('utf8')])\n if self._trim_question_ids:\n question_id = question.id[-text_utils.DEFAULT_INTS_LENGTH:]\n else:\n question_id = question.id\n features['question_id_ints'] = create_int_feature(\n text_utils.str_to_ints(\n question_id, length=text_utils.DEFAULT_INTS_LENGTH))\n features['aggregation_function_id'] = create_int_feature(\n [question.answer.aggregation_function])\n features['classification_class_index'] = create_int_feature(\n [question.answer.class_index])\n\n answer = question.answer.float_value if question.answer.HasField(\n 'float_value') else _NAN\n features['answer'] = create_float_feature([answer])\n\n if self._add_aggregation_candidates:\n rng = random.Random(fingerprint(question.id))\n\n candidates = interpretation_utils.find_candidates(rng, table, question)\n num_initial_candidates = len(candidates)\n\n candidates = [c for c in candidates if len(c.rows) < _MAX_NUM_ROWS]\n candidates = candidates[:_MAX_NUM_CANDIDATES]\n\n funs = [0] * _MAX_NUM_CANDIDATES\n sizes = [0] * _MAX_NUM_CANDIDATES\n indexes = []\n\n num_final_candidates = 0\n for index, candidate in enumerate(candidates):\n token_indexes = []\n for row in candidate.rows:\n token_indexes += _get_cell_token_indexes(column_ids, row_ids,\n candidate.column, row)\n if len(indexes) + len(serialized_example.tokens) > _MAX_INDEX_LENGTH:\n break\n num_final_candidates += 1\n sizes[index] = len(token_indexes)\n funs[index] = candidate.agg_function\n indexes += token_indexes\n\n # <int>[1]\n features['cand_num'] = create_int_feature([num_final_candidates])\n # <int>[_MAX_NUM_CANDIDATES]\n features['can_aggregation_function_ids'] = create_int_feature(funs)\n # <int>[_MAX_NUM_CANDIDATES]\n features['can_sizes'] = create_int_feature(sizes)\n # <int>[_MAX_INDEX_LENGTH]\n # Actual length is sum(sizes).\n features['can_indexes'] = create_int_feature(indexes)\n\n if num_initial_candidates > 0:\n beam_metrics.Metrics.counter(\n _NS,\n _get_buckets(num_initial_candidates,\n [10, 20, 50, 100, 200, 500, 1000, 1200, 1500],\n 'Candidates Size:')).inc()\n\n beam_metrics.Metrics.counter(_NS, 'Candidates: Input').inc()\n if num_final_candidates != num_initial_candidates:\n beam_metrics.Metrics.counter(_NS,\n 'Candidates: Dropped candidates').inc()\n\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n def get_empty_example(self):\n interaction = interaction_pb2.Interaction(questions=[\n interaction_pb2.Question(id=text_utils.get_padded_question_id())\n ])\n return self.convert(interaction, index=0)\n"
] | [
[
"tensorflow.compat.v1.train.Features",
"tensorflow.compat.v1.io.gfile.GFile"
]
] |
gdepalma93/bright-athlete-academy | [
"54ba0cc6633637c1bd6d90120153e04b981244bf"
] | [
"Resources/books/long_short_term_memory_networks_with_python/code/lesson_12/tune_batch_size.py"
] | [
"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom matplotlib import pyplot\nfrom pandas import DataFrame\nfrom numpy import array\n\n# return training data\ndef get_train():\n\tseq = [[0.0, 0.1], [0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5]]\n\tseq = array(seq)\n\tX, y = seq[:, 0], seq[:, 1]\n\tX = X.reshape((5, 1, 1))\n\treturn X, y\n\n# return validation data\ndef get_val():\n\tseq = [[0.5, 0.6], [0.6, 0.7], [0.7, 0.8], [0.8, 0.9], [0.9, 1.0]]\n\tseq = array(seq)\n\tX, y = seq[:, 0], seq[:, 1]\n\tX = X.reshape((len(X), 1, 1))\n\treturn X, y\n\n# fit an LSTM model\ndef fit_model(n_batch):\n\t# define model\n\tmodel = Sequential()\n\tmodel.add(LSTM(10, input_shape=(1,1)))\n\tmodel.add(Dense(1, activation='linear'))\n\t# compile model\n\tmodel.compile(loss='mse', optimizer='adam')\n\t# fit model\n\tX,y = get_train()\n\tmodel.fit(X, y, epochs=500, shuffle=False, verbose=0, batch_size=n_batch)\n\t# evaluate model\n\tvalX, valY = get_val()\n\tloss = model.evaluate(valX, valY, verbose=0)\n\treturn loss\n\n# define scope of search\nparams = [1, 2, 3]\nn_repeats = 5\n# grid search parameter values\nscores = DataFrame()\nfor value in params:\n\t# repeat each experiment multiple times\n\tloss_values = list()\n\tfor i in range(n_repeats):\n\t\tloss = fit_model(value)\n\t\tloss_values.append(loss)\n\t\tprint('>%d/%d param=%f, loss=%f' % (i+1, n_repeats, value, loss))\n\t# store results for this parameter\n\tscores[str(value)] = loss_values\n# summary statistics of results\nprint(scores.describe())\n# box and whisker plot of results\nscores.boxplot()\npyplot.show()\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.show",
"pandas.DataFrame"
]
] |
GU-DataLab/fairness-and-missing-values | [
"4b9383d2e383ae49a0cd6c94e3c9cf7c3a584581"
] | [
"env/lib/python3.7/site-packages/art/defences/preprocessor/variance_minimization.py"
] | [
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the total variance minimization defence `TotalVarMin`.\n\n| Paper link: https://openreview.net/forum?id=SyJ7ClWCb\n\n| Please keep in mind the limitations of defences. For more information on the limitations of this defence,\n see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see\n https://arxiv.org/abs/1902.06705\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom tqdm import tqdm\n\nfrom art.config import ART_NUMPY_DTYPE\nfrom art.defences.preprocessor.preprocessor import Preprocessor\n\nif TYPE_CHECKING:\n from art.utils import CLIP_VALUES_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass TotalVarMin(Preprocessor):\n \"\"\"\n Implement the total variance minimization defence approach.\n\n | Paper link: https://openreview.net/forum?id=SyJ7ClWCb\n\n | Please keep in mind the limitations of defences. For more information on the limitations of this\n defence, see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general,\n see https://arxiv.org/abs/1902.06705\n \"\"\"\n\n params = [\"prob\", \"norm\", \"lamb\", \"solver\", \"max_iter\", \"clip_values\", \"verbose\"]\n\n def __init__(\n self,\n prob: float = 0.3,\n norm: int = 2,\n lamb: float = 0.5,\n solver: str = \"L-BFGS-B\",\n max_iter: int = 10,\n clip_values: Optional[\"CLIP_VALUES_TYPE\"] = None,\n apply_fit: bool = False,\n apply_predict: bool = True,\n verbose: bool = False,\n ):\n \"\"\"\n Create an instance of total variance minimization.\n\n :param prob: Probability of the Bernoulli distribution.\n :param norm: The norm (positive integer).\n :param lamb: The lambda parameter in the objective function.\n :param solver: Current support: `L-BFGS-B`, `CG`, `Newton-CG`.\n :param max_iter: Maximum number of iterations when performing optimization.\n :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed\n for features.\n :param apply_fit: True if applied during fitting/training.\n :param apply_predict: True if applied during predicting.\n :param verbose: Show progress bars.\n \"\"\"\n super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)\n self.prob = prob\n self.norm = norm\n self.lamb = lamb\n self.solver = solver\n self.max_iter = max_iter\n self.clip_values = clip_values\n self.verbose = verbose\n self._check_params()\n\n def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n \"\"\"\n Apply total variance minimization to sample `x`.\n\n :param x: Sample to compress with shape `(batch_size, width, height, depth)`.\n :param y: Labels of the sample `x`. This function does not affect them in any way.\n :return: Similar samples.\n \"\"\"\n if len(x.shape) == 2:\n raise ValueError(\n \"Feature vectors detected. Variance minimization can only be applied to data with spatial dimensions.\"\n )\n x_preproc = x.copy()\n\n # Minimize one input at a time\n for i, x_i in enumerate(tqdm(x_preproc, desc=\"Variance minimization\", disable=not self.verbose)):\n mask = (np.random.rand(*x_i.shape) < self.prob).astype(\"int\")\n x_preproc[i] = self._minimize(x_i, mask)\n\n if self.clip_values is not None:\n np.clip(x_preproc, self.clip_values[0], self.clip_values[1], out=x_preproc)\n\n return x_preproc.astype(ART_NUMPY_DTYPE), y\n\n def _minimize(self, x: np.ndarray, mask: np.ndarray) -> np.ndarray:\n \"\"\"\n Minimize the total variance objective function.\n\n :param x: Original image.\n :param mask: A matrix that decides which points are kept.\n :return: A new image.\n \"\"\"\n z_min = x.copy()\n\n for i in range(x.shape[2]):\n res = minimize(\n self._loss_func,\n z_min[:, :, i].flatten(),\n (x[:, :, i], mask[:, :, i], self.norm, self.lamb),\n method=self.solver,\n jac=self._deri_loss_func,\n options={\"maxiter\": self.max_iter},\n )\n z_min[:, :, i] = np.reshape(res.x, z_min[:, :, i].shape)\n\n return z_min\n\n @staticmethod\n def _loss_func(z_init: np.ndarray, x: np.ndarray, mask: np.ndarray, norm: int, lamb: float) -> float:\n \"\"\"\n Loss function to be minimized.\n\n :param z_init: Initial guess.\n :param x: Original image.\n :param mask: A matrix that decides which points are kept.\n :param norm: The norm (positive integer).\n :param lamb: The lambda parameter in the objective function.\n :return: Loss value.\n \"\"\"\n res = np.sqrt(np.power(z_init - x.flatten(), 2).dot(mask.flatten()))\n z_init = np.reshape(z_init, x.shape)\n res += lamb * np.linalg.norm(z_init[1:, :] - z_init[:-1, :], norm, axis=1).sum()\n res += lamb * np.linalg.norm(z_init[:, 1:] - z_init[:, :-1], norm, axis=0).sum()\n\n return res\n\n @staticmethod\n def _deri_loss_func(z_init: np.ndarray, x: np.ndarray, mask: np.ndarray, norm: int, lamb: float) -> float:\n \"\"\"\n Derivative of loss function to be minimized.\n\n :param z_init: Initial guess.\n :param x: Original image.\n :param mask: A matrix that decides which points are kept.\n :param norm: The norm (positive integer).\n :param lamb: The lambda parameter in the objective function.\n :return: Derivative value.\n \"\"\"\n # First compute the derivative of the first component of the loss function\n nor1 = np.sqrt(np.power(z_init - x.flatten(), 2).dot(mask.flatten()))\n if nor1 < 1e-6:\n nor1 = 1e-6\n der1 = ((z_init - x.flatten()) * mask.flatten()) / (nor1 * 1.0)\n\n # Then compute the derivative of the second component of the loss function\n z_init = np.reshape(z_init, x.shape)\n\n if norm == 1:\n z_d1 = np.sign(z_init[1:, :] - z_init[:-1, :])\n z_d2 = np.sign(z_init[:, 1:] - z_init[:, :-1])\n else:\n z_d1_norm = np.power(np.linalg.norm(z_init[1:, :] - z_init[:-1, :], norm, axis=1), norm - 1)\n z_d2_norm = np.power(np.linalg.norm(z_init[:, 1:] - z_init[:, :-1], norm, axis=0), norm - 1)\n z_d1_norm[z_d1_norm < 1e-6] = 1e-6\n z_d2_norm[z_d2_norm < 1e-6] = 1e-6\n z_d1_norm = np.repeat(z_d1_norm[:, np.newaxis], z_init.shape[1], axis=1)\n z_d2_norm = np.repeat(z_d2_norm[np.newaxis, :], z_init.shape[0], axis=0)\n z_d1 = norm * np.power(z_init[1:, :] - z_init[:-1, :], norm - 1) / z_d1_norm\n z_d2 = norm * np.power(z_init[:, 1:] - z_init[:, :-1], norm - 1) / z_d2_norm\n\n der2 = np.zeros(z_init.shape)\n der2[:-1, :] -= z_d1\n der2[1:, :] += z_d1\n der2[:, :-1] -= z_d2\n der2[:, 1:] += z_d2\n der2 = lamb * der2.flatten()\n\n # Total derivative\n return der1 + der2\n\n def _check_params(self) -> None:\n if not isinstance(self.prob, (float, int)) or self.prob < 0.0 or self.prob > 1.0:\n logger.error(\"Probability must be between 0 and 1.\")\n raise ValueError(\"Probability must be between 0 and 1.\")\n\n if not isinstance(self.norm, (int, np.int)) or self.norm <= 0:\n logger.error(\"Norm must be a positive integer.\")\n raise ValueError(\"Norm must be a positive integer.\")\n\n if not (self.solver == \"L-BFGS-B\" or self.solver == \"CG\" or self.solver == \"Newton-CG\"):\n logger.error(\"Current support only L-BFGS-B, CG, Newton-CG.\")\n raise ValueError(\"Current support only L-BFGS-B, CG, Newton-CG.\")\n\n if not isinstance(self.max_iter, (int, np.int)) or self.max_iter <= 0:\n logger.error(\"Number of iterations must be a positive integer.\")\n raise ValueError(\"Number of iterations must be a positive integer.\")\n\n if self.clip_values is not None:\n\n if len(self.clip_values) != 2:\n raise ValueError(\"`clip_values` should be a tuple of 2 floats containing the allowed data range.\")\n\n if np.array(self.clip_values[0] >= self.clip_values[1]).any():\n raise ValueError(\"Invalid `clip_values`: min >= max.\")\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n"
] | [
[
"numpy.sign",
"numpy.zeros",
"numpy.reshape",
"numpy.repeat",
"numpy.clip",
"numpy.power",
"numpy.random.rand",
"numpy.array",
"numpy.linalg.norm"
]
] |
meta00/vital_sqi | [
"7e64a26c9d56af26bfbd25c3ba30211414f5f845"
] | [
"vital_sqi/sqi/dtw_sqi.py"
] | [
"import numpy as np\nimport sys\nimport os\nif bool(getattr(sys, 'ps1', sys.flags.interactive)):\n old_stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n from dtw import dtw\n sys.stdout = old_stdout\nelse:\n from dtw import dtw\n\nfrom vital_sqi.common.generate_template import (\n ppg_absolute_dual_skewness_template,\n ppg_dual_double_frequency_template,\n ppg_nonlinear_dynamic_system_template,\n ecg_dynamic_template\n )\nfrom vital_sqi.common.utils import check_valid_signal\nfrom scipy.spatial.distance import euclidean\n\ndef compute_dtw_distance(input_sequence, template_sequence):\n dtw_distances = np.ones((len(input_sequence),len(template_sequence))) * np.inf\n #first matching sample is set to zero\n dtw_distances[0,0] = 0\n for i in range(len(input_sequence)):\n for j in range(len(template_sequence)):\n cost = euclidean(input_sequence[i],template_sequence[j])\n # dtw_distances\n\n\n\ndef dtw_sqi(x, template_type=0):\n \"\"\"Using DTW to get the mapping point distance between a signal and its\n template. The DTW SQI is the ratio of the distance sum to\n the trace of cost matrix. The closer to 1 the better SQI.\n\n Parameters\n ----------\n x :\n array_like, signal containing int or float values.\n template_type :\n int,\n 0: ppg_absolute_dual_skewness_template,\n 1: ppg_dual_double_frequency_template,\n 2: ppg_nonlinear_dynamic_system_template,\n 3: ecg_dynamic_template\n default = 0\n\n Returns\n -------\n\n \"\"\"\n check_valid_signal(x)\n if template_type > 3 or type(template_type) != int:\n raise ValueError(\"Invalid template type\")\n if template_type == 0:\n reference = ppg_nonlinear_dynamic_system_template(len(x)).reshape(-1)\n elif template_type == 1:\n reference = ppg_dual_double_frequency_template(len(x))\n if template_type == 2:\n reference = ppg_absolute_dual_skewness_template(len(x))\n if template_type == 3:\n reference = ecg_dynamic_template(len(x))\n alignmentOBE = dtw(x, reference, keep_internals=True,\n step_pattern='asymmetric', open_end=True,\n open_begin=True)\n match_distance = []\n for i in range(len(alignmentOBE.index2)):\n match_distance.append(\n alignmentOBE.costMatrix[i][alignmentOBE.index2[i]]\n )\n trace = alignmentOBE.costMatrix.trace()\n if trace == 0:\n ratio = float(np.log(1))\n else:\n ratio = float(np.log(np.sum(match_distance)/trace))\n return ratio\n"
] | [
[
"numpy.log",
"numpy.sum",
"scipy.spatial.distance.euclidean"
]
] |
QianLabUSC/cognitively-enhanced-decision-framework | [
"1797ddd41edcbfbfafca5b599ff7ab70f5fdc37f"
] | [
"rule_based_decision_making.py"
] | [
"# This FILE is part of multi-legged robot field exploration model\r\n# env_wrapper.py - to obtain user interaction data from website\r\n#\r\n# This programm is explained by roboLAND in university of southern california.\r\n# Please notify the source if you use it\r\n# \r\n# Copyright(c) 2021-2025 Ryoma Liu\r\n# Email: [email protected]\r\n\r\nfrom env_wrapper import *\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import optimize\r\nimport random\r\nimport matplotlib.pylab as pylab\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom math import *\r\n\r\nclass rule_state_machine:\r\n def __init__(self):\r\n '''Initial env info and parameters for decision making\r\n '''\r\n self.states = ['Initial', 'Exploration', 'Verification']\r\n self.current_state = 0\r\n self.env = ENV()\r\n self.hypo_locations = (['No','Feature_low','Feature_middle',\r\n 'Feature_high'])\r\n self.hypo_location = 0\r\n self.hypo_samples = (['No','Feature_low', 'Feature_middle',\r\n 'Feature_high'])\r\n self.hypo_sample = 0\r\n self.information_matrix = []\r\n self.accuracy_matrix = []\r\n self.fitting_error_matrix = []\r\n\r\n\r\n def set_init_hypo(self, hypo_location, hypo_sample):\r\n self.hypo_location = hypo_location\r\n self.hypo_sample = hypo_sample\r\n\r\n def choose_initial_template(self):\r\n '''choose initial template\r\n\r\n According to the initial knowledge and hypothesis, human will select a\r\n experience data sample distribution\r\n\r\n Args:\r\n self.hypo_location: inital hypo about data location feature\r\n self.hypo_sample : initial hypo about data sample feature\r\n\r\n Returns:\r\n change the initial template in env wrapper\r\n '''\r\n if(self.hypo_location == 0):\r\n location_index = [1,9,13,21]\r\n elif(self.hypo_location == 1):\r\n location_index = [1,4,7,11,16,21]\r\n elif(self.hypo_location == 2):\r\n location_index = [1,5,9,12,15,21]\r\n elif(self.hypo_location == 3):\r\n location_index = [1,6,11,14,17,20]\r\n if(self.hypo_sample == 0):\r\n sample_index = [3,3,3,3]\r\n elif(self.hypo_sample == 1):\r\n sample_index = [5,5,3,3,3,3]\r\n elif(self.hypo_sample == 2):\r\n sample_index = [3,3,5,5,3,3]\r\n elif(self.hypo_sample == 3):\r\n sample_index = [3,3,3,3,5,5]\r\n initial_action = [location_index, sample_index]\r\n self.env.initiate_template(initial_action)\r\n\r\n def handle_information_coverage(self):\r\n sample_state = self.env.get_state()\r\n sample_loc = np.array(sample_state[0])\r\n sample_number = np.array(sample_state[1])\r\n sort_index = np.argsort(sample_loc)\r\n sample_loc = sample_loc[sort_index]\r\n sample_number = sample_number[sort_index]\r\n unique_index = np.unique(sample_loc, return_index = True)\r\n sample_loc = sample_loc[unique_index[1]]\r\n sample_number = sample_number[unique_index[1]]\r\n sample_state = [sample_loc, sample_number]\r\n\r\n print(sample_state) \r\n self.information_matrix = np.zeros(22) #information matrix in location\r\n self.variable_coverage = np.zeros(20)\r\n for i in range(len(sample_state[0])):\r\n scale = 0.1 * sample_state[1][i] + 1\r\n locs = sample_state[0][i] + 1\r\n self.information_matrix += gauss(locs, scale)\r\n # print(self.information_matrix)\r\n # print(gauss(locs, scale))\r\n # self.plot_line('cool green', np.linspace(1,22,22), gauss(locs, scale), 'test'+str(i))\r\n # print(\"coverage_matrix: \", self.information_matrix)\r\n mm, erodi = self.env.get_data_state()\r\n mm_mean = np.mean(mm, axis=0)\r\n mm_nonzero = mm[np.nonzero(mm)]\r\n mm_mean_nonzero = mm_mean[np.nonzero(mm_mean)]\r\n start = 0 # 区间左端点\r\n number_of_interval = 20 # 区间个数\r\n length = 1 # 区间长度\r\n intervals = {'{}~{}'.format(length*x+start, length*(x+1)+start): 0 for x in range(number_of_interval)} # 生成区间\r\n result = np.array(interval_statistics(mm_nonzero, intervals))\r\n self.variable_coverage = len(result[(np.nonzero(result))])/len(result)\r\n result_number = np.linspace(0, 19, 20)\r\n variable_information = np.zeros(20)\r\n for i in range(len(result_number)):\r\n single_converage = gauss_variable(result_number[i] +0.5, result[i])\r\n variable_information += single_converage\r\n # feed the variable coverage into the previous belief\r\n self.variable_information = variable_information\r\n \r\n # print(mm_mean_nonzero)\r\n # print(sample_state[0])\r\n # p , e = optimize.curve_fit(piecewise_linear_moisture, np.array(sample_state[0])+1, mm_mean_nonzero)\r\n # xloc = np.linspace(1, 22, 22)\r\n # xmoisture = piecewise_linear_moisture(xloc, *p)\r\n # self.mapping_value = []\r\n # for emoisture in xmoisture:\r\n # self.mapping_value.append(variable_information[int(emoisture)])\r\n\r\n # print(variable_information)\r\n # print(self.mapping_value)\r\n # plt.plot(xloc,xmoisture )\r\n \r\n # plt.show()\r\n\r\n\r\n def handle_information_accuracy(self):\r\n accuracy_matrix = []\r\n mm, data_state = self.env.get_data_state()\r\n loc_state = self.env.get_state()\r\n # error_cost = np.std(data_state, axis=0)\r\n for col in range(data_state.shape[1]): \r\n if col in loc_state[0]:\r\n effective_data = data_state[:,col][np.nonzero(data_state[:,col])]\r\n # print(effective_data)\r\n median = np.median(effective_data) \r\n k1 = 1.4826\r\n mad = k1 * np.median(np.abs(effective_data-median))\r\n lower_limit = median - (3*mad)\r\n upper_limit = median + (3*mad)\r\n outlier_data_num = (len(effective_data[(effective_data> \r\n upper_limit) & (effective_data<lower_limit)]))\r\n data_samples = len(effective_data)\r\n if(data_samples == 0):\r\n total_cost = 0\r\n elif(data_samples > 0):\r\n total_cost = 1 - 1/(1+ (data_samples - 0.99)/(3*outlier_data_num + 1))\r\n accuracy_matrix.append(total_cost)\r\n else:\r\n accuracy_matrix.append(0)\r\n self.accuracy_matrix = accuracy_matrix\r\n # print('accuracy_matrix: ', self.accuracy_matrix)\r\n\r\n\r\n def handle_feature_point_detection(self):\r\n loc_state = self.env.get_state()[0]\r\n #print(self.env.get_state())\r\n self.fitting_error_matrix = np.zeros(22)\r\n mm, erodi = self.env.get_data_state()\r\n mm_mean = np.mean(mm, axis=0)\r\n mm_nonzeroindex = (mm_mean != 0)\r\n erodi_mean = np.mean(erodi, axis=0)\r\n self.loc_index = np.linspace(1,22,22)[mm_nonzeroindex]\r\n data_index = mm_mean[mm_nonzeroindex]\r\n data_mean = erodi_mean[mm_nonzeroindex]\r\n p , e = optimize.curve_fit(piecewise_linear, data_index, data_mean)\r\n # print('dfadfaaf', p)\r\n xd = np.linspace(0, np.max(data_index), 22)\r\n fit_curve = piecewise_linear(xd, *p)\r\n fitting_results = piecewise_linear(data_index, *p)\r\n self.fitting_results = fitting_results\r\n fitting_error = fitting_results - data_mean\r\n mm_mean[mm_nonzeroindex] = fitting_error\r\n self.data_index = data_index\r\n self.fitting_error_matrix[mm_nonzeroindex] = fitting_error\r\n\r\n # print(data_mean)\r\n nonzero_data_mean = data_mean[np.nonzero(data_mean != 0)]\r\n rmse_data = (sqrt(np.sum(np.power(nonzero_data_mean, 2))/\r\n np.size(nonzero_data_mean)))\r\n # print(rmse_data)\r\n self.rmse_data = rmse_data\r\n # plt.plot(xd, fit_curve)\r\n # plt.plot(data_index, data_mean, \"o\")\r\n # plt.plot(data_index, fitting_results, \"*\")\r\n # #plt.plot(data_index, fitting_error, \"+\")\r\n # plt.show()\r\n # plt.savefig('123.png')\r\n\r\n\r\n # find the feature point location\r\n array = np.asarray(data_index)\r\n idx = (np.abs(array - p[0])).argmin()\r\n loc_indx = loc_state[idx]\r\n saturation_estimated = int(loc_indx * (p[0]/array[idx]))\r\n self.saturation_selection = np.arange(saturation_estimated - 2, saturation_estimated + 3, 1)\r\n \r\n\r\n def confidence_model(self):\r\n non_zero_matrix = (self.fitting_error_matrix[np.nonzero\r\n (self.fitting_error_matrix != 0)])\r\n rmse = (sqrt(np.sum(np.power(non_zero_matrix, 2))/\r\n np.size(non_zero_matrix)))\r\n # print(rmse)\r\n # print(self.fitting_error_matrix)\r\n # print(non_zero_matrix)\r\n whole_rmse_percentage = rmse/self.rmse_data\r\n # print(whole_rmse_percentage)\r\n confindence = (0.04 - whole_rmse_percentage) * 30 * self.coverage_criteria\r\n # print(confindence)\r\n\r\n def handle_state_judge(self):\r\n if(self.current_state == 0):\r\n self.current_state = 1\r\n elif(self.current_state == 1):\r\n if(np.min(self.accuracy_matrix) > 0.7 and \r\n len(self.information_matrix[self.information_matrix > 0.8]) > 15):\r\n self.current_state = 2\r\n else: \r\n self.current_state = 1\r\n elif(self.current_state == 2):\r\n if(len(self.fitting_error_matrix[self.fitting_error_matrix > 0.8]) > 0):\r\n self.current_state = 1\r\n elif():\r\n self.current_state = 2\r\n \r\n def information_model(self):\r\n self.coverage_criteria = (len(self.information_matrix[self.information_matrix\r\n > 0.3]) / 22)\r\n accuracy_matrix = np.array(self.accuracy_matrix)\r\n # print(accuracy_matrix)\r\n self.accuracy_criteria = (len(accuracy_matrix[(accuracy_matrix > 0.6) & (accuracy_matrix != 0)]) /\r\n len(accuracy_matrix[accuracy_matrix != 0]))\r\n \r\n # print('accuracy_value:', self.accuracy_criteria) # percentage of locs which the accuracy is lower than 0.6\r\n # print('coverage_value:', self.coverage_criteria) # percentage of locs which the information is lower than 0.8\r\n \r\n\r\n def take_action(self):\r\n if(self.current_state == 0):\r\n self.choose_initial_template()\r\n elif(self.current_state == 1):\r\n action_loc = np.argmin(self.information_matrix)\r\n self.env.set_action([action_loc],[3])\r\n accuracy_loc = np.where(self.accuracy_matrix < 0.7)\r\n accuracy_samples = np.ones(len(accuracy_loc))\r\n self.env.set_action(accuracy_loc,accuracy_samples) \r\n elif(self.current_state == 2):\r\n fitting_error_loc = np.where(self.fitting_error_matrix > 0.8)\r\n add_loc = []\r\n add_samples = []\r\n current_state = self.env.get_state()\r\n for i in fitting_error_loc:\r\n if not i+1 in current_state[0]:\r\n add_loc.append(i+1)\r\n add_samples.append(3)\r\n if not i-1 in current_state[0]:\r\n add_loc.append(i-1)\r\n add_samples.append(3)\r\n self.env.set_action(add_loc, add_samples)\r\n\r\n def plot(self, color, name):\r\n myparams = {\r\n\r\n 'axes.labelsize': '10',\r\n\r\n 'xtick.labelsize': '10',\r\n\r\n 'ytick.labelsize': '10',\r\n\r\n 'lines.linewidth': 1,\r\n\r\n 'legend.fontsize': '3',\r\n\r\n 'font.family': 'Times New Roman',\r\n\r\n 'figure.figsize': '9, 5' #图片尺寸\r\n\r\n }\r\n\r\n pylab.rcParams.update(myparams) #更新自己的设置\r\n # line_styles=['ro-','b^-','gs-','ro--','b^--','gs--'] #线型设置\r\n \r\n fig1 = plt.figure(1)\r\n a = plt.plot(self.coverage_criteria, self.accuracy_criteria ,marker='o', color=sns.xkcd_rgb[color],\r\n markersize=5)\r\n \r\n plt.legend(loc=\"lower right\") #图例位置 右下角\r\n plt.ylabel('accuracy') \r\n plt.xlabel('coverage ') \r\n plt.xlim((0, 1.1))\r\n plt.ylim((0, 1.1))\r\n plt.axvline(x=1, c=\"b\", ls=\"--\", lw=1)\r\n plt.axhline(y=1, c=\"b\", ls=\"--\", lw=1)\r\n plt.savefig(name)\r\n\r\n #注意.show()操作后会默认打开一个空白fig,此时保存,容易出现保存的为纯白背景,所以请在show()操作前保存fig.\r\n # plt.show()\r\n\r\ndef interval_statistics(data, intervals):\r\n if len(data) == 0:\r\n return\r\n for num in data:\r\n for interval in intervals:\r\n lr = tuple(interval.split('~'))\r\n left, right = float(lr[0]), float(lr[1])\r\n if left <= num <= right:\r\n intervals[interval] += 1\r\n results = []\r\n for key, value in intervals.items():\r\n #print(\"%10s\" % key, end='') # 借助 end=''可以不换行\r\n # print(\"%10s\" % value, end='') # \"%10s\" 右对齐\r\n #print('%16s' % '{:.3%}'.format(value * 1.0 / len(data)))\r\n results.append(value)\r\n return results\r\n\r\n\r\ndef piecewise_linear(x, x0, y0, k1):\r\n\t# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0\r\n\t# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0\r\n return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0, \r\n lambda x: y0])\r\ndef piecewise_linear_moisture(x, x0, y0, k1, k2):\r\n\t# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0\r\n\t# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0\r\n return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0, \r\n lambda x: k2*x + y0 - k2*x0])\r\ndef gauss(mean, scale, x=np.linspace(1,22,22), sigma=1):\r\n return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))\r\ndef gauss_variable(mean, scale, x=np.linspace(0,19,20), sigma=1):\r\n return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))\r\nif __name__ == \"__main__\":\r\n DM = rule_state_machine()\r\n DM.choose_initial_template()\r\n # x = np.linspace(1,22,22)\r\n # information_matrix = gauss(1,0.1).reshape(22,1)\r\n # print(information_matrix)\r\n # sns.set()\r\n # ax = sns.heatmap(information_matrix, vmin=0, vmax=1)\r\n # plt.title('Information Matrix')\r\n # plt.savefig(\"test.png\") \r\n DM.handle_information_accuracy()\r\n DM.handle_information_coverage()\r\n DM.information_model()\r\n DM.plot('cool green','test') \r\n DM.handle_feature_point_detection()\r\n DM.confidence_model()\r\n "
] | [
[
"scipy.optimize.curve_fit",
"numpy.argsort",
"numpy.asarray",
"numpy.size",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pylab.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.argmin",
"numpy.abs",
"matplotlib.pyplot.xlim",
"numpy.where",
"numpy.linspace",
"numpy.unique",
"numpy.mean",
"numpy.nonzero",
"matplotlib.pyplot.axvline",
"numpy.zeros",
"matplotlib.pyplot.axhline",
"numpy.median",
"numpy.arange",
"numpy.max",
"numpy.power",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.square",
"matplotlib.pyplot.legend",
"numpy.piecewise",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
GT-AcerZhang/yolov3.insects_challenge | [
"1ac6ee5a8a5c534ec11723542f4c10583935a2ad"
] | [
"train.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport time\nimport os\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.base import to_variable\n\nfrom reader import data_loader, test_data_loader, multithread_loader\nfrom yolov3 import YOLOv3\n\n# train.py\n# 提升点: 可以改变anchor的大小,注意训练和测试时要使用同样的anchor\nANCHORS = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]\n\nANCHOR_MASKS = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n\nIGNORE_THRESH = .7\nNUM_CLASSES = 7\n\nTRAINDIR = 'insects/train'\nVALIDDIR = 'insects/val'\n\n# train.py\nif __name__ == '__main__':\n with fluid.dygraph.guard():\n model = YOLOv3('yolov3', num_classes = NUM_CLASSES, is_train=True)\n opt = fluid.optimizer.Momentum(\n learning_rate=0.001, #提升点:可以调整学习率,或者设置学习率衰减\n momentum=0.9) # 提升点: 可以添加正则化项\n\n train_loader = multithread_loader(TRAINDIR, batch_size= 10, mode='train')\n valid_loader = multithread_loader(VALIDDIR, batch_size= 10, mode='valid')\n\n MAX_EPOCH = 100 # 提升点: 可以改变训练的轮数\n for epoch in range(MAX_EPOCH):\n for i, data in enumerate(train_loader()):\n img, gt_boxes, gt_labels, img_scale = data\n gt_scores = np.ones(gt_labels.shape).astype('float32')\n gt_scores = to_variable(gt_scores)\n img = to_variable(img)\n gt_boxes = to_variable(gt_boxes)\n gt_labels = to_variable(gt_labels)\n outputs = model(img)\n loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,\n anchors = ANCHORS,\n anchor_masks = ANCHOR_MASKS,\n ignore_thresh=IGNORE_THRESH,\n use_label_smooth=False)\n\n loss.backward()\n opt.minimize(loss)\n model.clear_gradients()\n if i % 1 == 0:\n timestring = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n print('{}[TRAIN]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))\n\n # save params of model\n if (epoch % 5 == 0) or (epoch == MAX_EPOCH -1):\n fluid.save_dygraph(model.state_dict(), 'yolo_epoch{}'.format(epoch))\n \n # 每个epoch结束之后在验证集上进行测试\n model.eval()\n for i, data in enumerate(valid_loader()):\n img, gt_boxes, gt_labels, img_scale = data\n gt_scores = np.ones(gt_labels.shape).astype('float32')\n gt_scores = to_variable(gt_scores)\n img = to_variable(img)\n gt_boxes = to_variable(gt_boxes)\n gt_labels = to_variable(gt_labels)\n outputs = model(img)\n loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,\n anchors = ANCHORS,\n anchor_masks = ANCHOR_MASKS,\n ignore_thresh=IGNORE_THRESH,\n use_label_smooth=False)\n if i % 1 == 0:\n timestring = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n print('{}[VALID]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))\n model.train()\n\n\n"
] | [
[
"numpy.ones"
]
] |
ShanuDey/tf-slim | [
"19c840abfa6de567d760254c42ea68760cf5d9f0"
] | [
"tf_slim/nets/vgg_test.py"
] | [
"# coding=utf-8\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.nets.vgg.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tf_slim.nets import vgg\nfrom tf_slim.ops import variables as variables_lib\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n# pylint:enable=g-direct-tensorflow-import\n\n\nclass VGGATest(test.TestCase):\n\n def testBuild(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(inputs, num_classes)\n self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testFullyConvolutional(self):\n batch_size = 1\n height, width = 256, 256\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)\n self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, 2, 2, num_classes])\n\n def testEndPoints(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n for is_training in [True, False]:\n with ops.Graph().as_default():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n _, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training)\n expected_names = [\n 'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',\n 'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',\n 'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',\n 'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',\n 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'\n ]\n self.assertSetEqual(set(end_points.keys()), set(expected_names))\n\n def testModelVariables(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n vgg.vgg_a(inputs, num_classes)\n expected_names = [\n 'vgg_a/conv1/conv1_1/weights',\n 'vgg_a/conv1/conv1_1/biases',\n 'vgg_a/conv2/conv2_1/weights',\n 'vgg_a/conv2/conv2_1/biases',\n 'vgg_a/conv3/conv3_1/weights',\n 'vgg_a/conv3/conv3_1/biases',\n 'vgg_a/conv3/conv3_2/weights',\n 'vgg_a/conv3/conv3_2/biases',\n 'vgg_a/conv4/conv4_1/weights',\n 'vgg_a/conv4/conv4_1/biases',\n 'vgg_a/conv4/conv4_2/weights',\n 'vgg_a/conv4/conv4_2/biases',\n 'vgg_a/conv5/conv5_1/weights',\n 'vgg_a/conv5/conv5_1/biases',\n 'vgg_a/conv5/conv5_2/weights',\n 'vgg_a/conv5/conv5_2/biases',\n 'vgg_a/fc6/weights',\n 'vgg_a/fc6/biases',\n 'vgg_a/fc7/weights',\n 'vgg_a/fc7/biases',\n 'vgg_a/fc8/weights',\n 'vgg_a/fc8/biases',\n ]\n model_variables = [v.op.name for v in variables_lib.get_model_variables()]\n self.assertSetEqual(set(model_variables), set(expected_names))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(eval_inputs, is_training=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n predictions = math_ops.argmax(logits, 1)\n self.assertListEqual(predictions.get_shape().as_list(), [batch_size])\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 2\n eval_batch_size = 1\n train_height, train_width = 224, 224\n eval_height, eval_width = 256, 256\n num_classes = 1000\n with self.cached_session():\n train_inputs = random_ops.random_uniform(\n (train_batch_size, train_height, train_width, 3))\n logits, _ = vgg.vgg_a(train_inputs)\n self.assertListEqual(logits.get_shape().as_list(),\n [train_batch_size, num_classes])\n variable_scope.get_variable_scope().reuse_variables()\n eval_inputs = random_ops.random_uniform(\n (eval_batch_size, eval_height, eval_width, 3))\n logits, _ = vgg.vgg_a(\n eval_inputs, is_training=False, spatial_squeeze=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [eval_batch_size, 2, 2, num_classes])\n logits = math_ops.reduce_mean(logits, [1, 2])\n predictions = math_ops.argmax(logits, 1)\n self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])\n\n def testForward(self):\n batch_size = 1\n height, width = 224, 224\n with self.cached_session() as sess:\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_a(inputs)\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits)\n self.assertTrue(output.any())\n\n\nclass VGG16Test(test.TestCase):\n\n def testBuild(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(inputs, num_classes)\n self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testFullyConvolutional(self):\n batch_size = 1\n height, width = 256, 256\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)\n self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, 2, 2, num_classes])\n\n def testEndPoints(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n for is_training in [True, False]:\n with ops.Graph().as_default():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n _, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training)\n expected_names = [\n 'vgg_16/conv1/conv1_1', 'vgg_16/conv1/conv1_2', 'vgg_16/pool1',\n 'vgg_16/conv2/conv2_1', 'vgg_16/conv2/conv2_2', 'vgg_16/pool2',\n 'vgg_16/conv3/conv3_1', 'vgg_16/conv3/conv3_2',\n 'vgg_16/conv3/conv3_3', 'vgg_16/pool3', 'vgg_16/conv4/conv4_1',\n 'vgg_16/conv4/conv4_2', 'vgg_16/conv4/conv4_3', 'vgg_16/pool4',\n 'vgg_16/conv5/conv5_1', 'vgg_16/conv5/conv5_2',\n 'vgg_16/conv5/conv5_3', 'vgg_16/pool5', 'vgg_16/fc6', 'vgg_16/fc7',\n 'vgg_16/fc8'\n ]\n self.assertSetEqual(set(end_points.keys()), set(expected_names))\n\n def testModelVariables(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n vgg.vgg_16(inputs, num_classes)\n expected_names = [\n 'vgg_16/conv1/conv1_1/weights',\n 'vgg_16/conv1/conv1_1/biases',\n 'vgg_16/conv1/conv1_2/weights',\n 'vgg_16/conv1/conv1_2/biases',\n 'vgg_16/conv2/conv2_1/weights',\n 'vgg_16/conv2/conv2_1/biases',\n 'vgg_16/conv2/conv2_2/weights',\n 'vgg_16/conv2/conv2_2/biases',\n 'vgg_16/conv3/conv3_1/weights',\n 'vgg_16/conv3/conv3_1/biases',\n 'vgg_16/conv3/conv3_2/weights',\n 'vgg_16/conv3/conv3_2/biases',\n 'vgg_16/conv3/conv3_3/weights',\n 'vgg_16/conv3/conv3_3/biases',\n 'vgg_16/conv4/conv4_1/weights',\n 'vgg_16/conv4/conv4_1/biases',\n 'vgg_16/conv4/conv4_2/weights',\n 'vgg_16/conv4/conv4_2/biases',\n 'vgg_16/conv4/conv4_3/weights',\n 'vgg_16/conv4/conv4_3/biases',\n 'vgg_16/conv5/conv5_1/weights',\n 'vgg_16/conv5/conv5_1/biases',\n 'vgg_16/conv5/conv5_2/weights',\n 'vgg_16/conv5/conv5_2/biases',\n 'vgg_16/conv5/conv5_3/weights',\n 'vgg_16/conv5/conv5_3/biases',\n 'vgg_16/fc6/weights',\n 'vgg_16/fc6/biases',\n 'vgg_16/fc7/weights',\n 'vgg_16/fc7/biases',\n 'vgg_16/fc8/weights',\n 'vgg_16/fc8/biases',\n ]\n model_variables = [v.op.name for v in variables_lib.get_model_variables()]\n self.assertSetEqual(set(model_variables), set(expected_names))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(eval_inputs, is_training=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n predictions = math_ops.argmax(logits, 1)\n self.assertListEqual(predictions.get_shape().as_list(), [batch_size])\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 2\n eval_batch_size = 1\n train_height, train_width = 224, 224\n eval_height, eval_width = 256, 256\n num_classes = 1000\n with self.cached_session():\n train_inputs = random_ops.random_uniform(\n (train_batch_size, train_height, train_width, 3))\n logits, _ = vgg.vgg_16(train_inputs)\n self.assertListEqual(logits.get_shape().as_list(),\n [train_batch_size, num_classes])\n variable_scope.get_variable_scope().reuse_variables()\n eval_inputs = random_ops.random_uniform(\n (eval_batch_size, eval_height, eval_width, 3))\n logits, _ = vgg.vgg_16(\n eval_inputs, is_training=False, spatial_squeeze=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [eval_batch_size, 2, 2, num_classes])\n logits = math_ops.reduce_mean(logits, [1, 2])\n predictions = math_ops.argmax(logits, 1)\n self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])\n\n def testForward(self):\n batch_size = 1\n height, width = 224, 224\n with self.cached_session() as sess:\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_16(inputs)\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits)\n self.assertTrue(output.any())\n\n\nclass VGG19Test(test.TestCase):\n\n def testBuild(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(inputs, num_classes)\n self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testFullyConvolutional(self):\n batch_size = 1\n height, width = 256, 256\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)\n self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, 2, 2, num_classes])\n\n def testEndPoints(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n for is_training in [True, False]:\n with ops.Graph().as_default():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n _, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)\n expected_names = [\n 'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2', 'vgg_19/pool1',\n 'vgg_19/conv2/conv2_1', 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',\n 'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',\n 'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4', 'vgg_19/pool3',\n 'vgg_19/conv4/conv4_1', 'vgg_19/conv4/conv4_2',\n 'vgg_19/conv4/conv4_3', 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',\n 'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',\n 'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4', 'vgg_19/pool5',\n 'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'\n ]\n self.assertSetEqual(set(end_points.keys()), set(expected_names))\n\n def testModelVariables(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n vgg.vgg_19(inputs, num_classes)\n expected_names = [\n 'vgg_19/conv1/conv1_1/weights',\n 'vgg_19/conv1/conv1_1/biases',\n 'vgg_19/conv1/conv1_2/weights',\n 'vgg_19/conv1/conv1_2/biases',\n 'vgg_19/conv2/conv2_1/weights',\n 'vgg_19/conv2/conv2_1/biases',\n 'vgg_19/conv2/conv2_2/weights',\n 'vgg_19/conv2/conv2_2/biases',\n 'vgg_19/conv3/conv3_1/weights',\n 'vgg_19/conv3/conv3_1/biases',\n 'vgg_19/conv3/conv3_2/weights',\n 'vgg_19/conv3/conv3_2/biases',\n 'vgg_19/conv3/conv3_3/weights',\n 'vgg_19/conv3/conv3_3/biases',\n 'vgg_19/conv3/conv3_4/weights',\n 'vgg_19/conv3/conv3_4/biases',\n 'vgg_19/conv4/conv4_1/weights',\n 'vgg_19/conv4/conv4_1/biases',\n 'vgg_19/conv4/conv4_2/weights',\n 'vgg_19/conv4/conv4_2/biases',\n 'vgg_19/conv4/conv4_3/weights',\n 'vgg_19/conv4/conv4_3/biases',\n 'vgg_19/conv4/conv4_4/weights',\n 'vgg_19/conv4/conv4_4/biases',\n 'vgg_19/conv5/conv5_1/weights',\n 'vgg_19/conv5/conv5_1/biases',\n 'vgg_19/conv5/conv5_2/weights',\n 'vgg_19/conv5/conv5_2/biases',\n 'vgg_19/conv5/conv5_3/weights',\n 'vgg_19/conv5/conv5_3/biases',\n 'vgg_19/conv5/conv5_4/weights',\n 'vgg_19/conv5/conv5_4/biases',\n 'vgg_19/fc6/weights',\n 'vgg_19/fc6/biases',\n 'vgg_19/fc7/weights',\n 'vgg_19/fc7/biases',\n 'vgg_19/fc8/weights',\n 'vgg_19/fc8/biases',\n ]\n model_variables = [v.op.name for v in variables_lib.get_model_variables()]\n self.assertSetEqual(set(model_variables), set(expected_names))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n with self.cached_session():\n eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(eval_inputs, is_training=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n predictions = math_ops.argmax(logits, 1)\n self.assertListEqual(predictions.get_shape().as_list(), [batch_size])\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 2\n eval_batch_size = 1\n train_height, train_width = 224, 224\n eval_height, eval_width = 256, 256\n num_classes = 1000\n with self.cached_session():\n train_inputs = random_ops.random_uniform(\n (train_batch_size, train_height, train_width, 3))\n logits, _ = vgg.vgg_19(train_inputs)\n self.assertListEqual(logits.get_shape().as_list(),\n [train_batch_size, num_classes])\n variable_scope.get_variable_scope().reuse_variables()\n eval_inputs = random_ops.random_uniform(\n (eval_batch_size, eval_height, eval_width, 3))\n logits, _ = vgg.vgg_19(\n eval_inputs, is_training=False, spatial_squeeze=False)\n self.assertListEqual(logits.get_shape().as_list(),\n [eval_batch_size, 2, 2, num_classes])\n logits = math_ops.reduce_mean(logits, [1, 2])\n predictions = math_ops.argmax(logits, 1)\n self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])\n\n def testForward(self):\n batch_size = 1\n height, width = 224, 224\n with self.cached_session() as sess:\n inputs = random_ops.random_uniform((batch_size, height, width, 3))\n logits, _ = vgg.vgg_19(inputs)\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits)\n self.assertTrue(output.any())\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.variable_scope.get_variable_scope"
]
] |
pedrob37/PhysicsPyTorch | [
"a892dfe89740b6fa75d3de5319f99d41bcf4ca63"
] | [
"ponai/data/grid_dataset.py"
] | [
"# Copyright 2020 ponai Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom typing import Union\n\nimport torch\nfrom torch.utils.data import IterableDataset\n\nfrom ponai.data.utils import iter_patch\nfrom ponai.utils import NumpyPadMode\n\n\nclass GridPatchDataset(IterableDataset):\n \"\"\"\n Yields patches from arrays read from an input dataset. The patches are chosen in a contiguous grid sampling scheme.\n \"\"\"\n\n def __init__(\n self, dataset, patch_size, start_pos=(), mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP, **pad_opts\n ):\n \"\"\"\n Initializes this dataset in terms of the input dataset and patch size. The `patch_size` is the size of the\n patch to sample from the input arrays. It is assumed the arrays first dimension is the channel dimension which\n will be yielded in its entirety so this should not be specified in `patch_size`. For example, for an input 3D\n array with 1 channel of size (1, 20, 20, 20) a regular grid sampling of eight patches (1, 10, 10, 10) would be\n specified by a `patch_size` of (10, 10, 10).\n\n Args:\n dataset (Dataset): the dataset to read array data from\n patch_size (tuple of int or None): size of patches to generate slices for, 0/None selects whole dimension\n start_pos (tuple of it, optional): starting position in the array, default is 0 for each dimension\n mode: {``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``, ``\"mean\"``,\n ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n One of the listed string values or a user supplied function. Defaults to ``\"wrap\"``.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n pad_opts (dict, optional): padding options, see numpy.pad\n \"\"\"\n\n self.dataset = dataset\n self.patch_size = (None,) + tuple(patch_size)\n self.start_pos = start_pos\n self.mode: NumpyPadMode = NumpyPadMode(mode)\n self.pad_opts = pad_opts\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n iter_start = 0\n iter_end = len(self.dataset)\n\n if worker_info is not None:\n # split workload\n per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers)))\n worker_id = worker_info.id\n iter_start = iter_start + worker_id * per_worker\n iter_end = min(iter_start + per_worker, iter_end)\n\n for index in range(iter_start, iter_end):\n arrays = self.dataset[index]\n\n iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode, **self.pad_opts) for a in arrays]\n\n yield from zip(*iters)\n"
] | [
[
"torch.utils.data.get_worker_info"
]
] |
kylemath/mne-python | [
"586c5d918a673ab5d5c92ffb4479fe57fee5559d"
] | [
"mne/channels/tests/test_montage.py"
] | [
"# Author: Teon Brooks <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom itertools import chain\nimport os\nimport os.path as op\n\nimport pytest\n\nimport numpy as np\nfrom functools import partial\nfrom string import ascii_lowercase\n\nfrom numpy.testing import (assert_array_equal,\n assert_allclose, assert_equal)\nimport matplotlib.pyplot as plt\n\nfrom mne import __file__ as _mne_file, create_info, read_evokeds, pick_types\nfrom mne.fixes import nullcontext\nfrom mne.utils._testing import assert_object_equal\nfrom mne.channels import (get_builtin_montages, DigMontage, read_dig_dat,\n read_dig_egi, read_dig_captrak, read_dig_fif,\n make_standard_montage, read_custom_montage,\n compute_dev_head_t, make_dig_montage,\n read_dig_polhemus_isotrak, compute_native_head_t,\n read_polhemus_fastscan,\n read_dig_hpts)\nfrom mne.channels.montage import transform_to_head, _check_get_coord_frame\nfrom mne.utils import run_tests_if_main, assert_dig_allclose\nfrom mne.bem import _fit_sphere\nfrom mne.io.constants import FIFF\nfrom mne.io._digitization import (_format_dig_points,\n _get_fid_coords, _get_dig_eeg,\n _count_points_by_type)\nfrom mne.transforms import _ensure_trans\nfrom mne.viz._3d import _fiducial_coords\n\nfrom mne.io.kit import read_mrk\nfrom mne.io import (read_raw_brainvision, read_raw_egi, read_raw_fif,\n read_fiducials, __file__ as _MNE_IO_FILE)\n\nfrom mne.io import RawArray\nfrom mne.datasets import testing\nfrom mne.io.brainvision import __file__ as _BRAINVISON_FILE\n\n\ndata_path = testing.data_path(download=False)\nfif_dig_montage_fname = op.join(data_path, 'montage', 'eeganes07.fif')\negi_dig_montage_fname = op.join(data_path, 'montage', 'coordinates.xml')\negi_raw_fname = op.join(data_path, 'montage', 'egi_dig_test.raw')\negi_fif_fname = op.join(data_path, 'montage', 'egi_dig_raw.fif')\nbvct_dig_montage_fname = op.join(data_path, 'montage', 'captrak_coords.bvct')\nbv_raw_fname = op.join(data_path, 'montage', 'bv_dig_test.vhdr')\nbv_fif_fname = op.join(data_path, 'montage', 'bv_dig_raw.fif')\nlocs_montage_fname = op.join(data_path, 'EEGLAB', 'test_chans.locs')\nevoked_fname = op.join(data_path, 'montage', 'level2_raw-ave.fif')\neeglab_fname = op.join(data_path, 'EEGLAB', 'test_raw.set')\nbdf_fname1 = op.join(data_path, 'BDF', 'test_generator_2.bdf')\nbdf_fname2 = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf')\negi_fname1 = op.join(data_path, 'EGI', 'test_egi.mff')\ncnt_fname = op.join(data_path, 'CNT', 'scan41_short.cnt')\n\nio_dir = op.dirname(_MNE_IO_FILE)\nkit_dir = op.join(io_dir, 'kit', 'tests', 'data')\nelp = op.join(kit_dir, 'test_elp.txt')\nhsp = op.join(kit_dir, 'test_hsp.txt')\nhpi = op.join(kit_dir, 'test_mrk.sqd')\nbv_fname = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')\nfif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')\nedf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test.edf')\nbdf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test_bdf_eeglab.mat')\negi_fname2 = op.join(io_dir, 'egi', 'tests', 'data', 'test_egi.raw')\nvhdr_path = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')\nctf_fif_fname = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')\nnicolet_fname = op.join(io_dir, 'nicolet', 'tests', 'data',\n 'test_nicolet_raw.data')\n\n\ndef _make_toy_raw(n_channels):\n return RawArray(\n data=np.empty([n_channels, 1]),\n info=create_info(\n ch_names=list(ascii_lowercase[:n_channels]),\n sfreq=1, ch_types='eeg'\n )\n )\n\n\ndef _make_toy_dig_montage(n_channels, **kwargs):\n return make_dig_montage(\n ch_pos=dict(zip(\n list(ascii_lowercase[:n_channels]),\n np.arange(n_channels * 3).reshape(n_channels, 3),\n )),\n **kwargs\n )\n\n\ndef _get_dig_montage_pos(montage):\n return np.array([d['r'] for d in _get_dig_eeg(montage.dig)])\n\n\ndef test_dig_montage_trans(tmpdir):\n \"\"\"Test getting a trans from montage.\"\"\"\n nasion, lpa, rpa, *ch_pos = np.random.RandomState(0).randn(10, 3)\n ch_pos = {f'EEG{ii:3d}': pos for ii, pos in enumerate(ch_pos, 1)}\n montage = make_dig_montage(ch_pos, nasion=nasion, lpa=lpa, rpa=rpa,\n coord_frame='mri')\n trans = compute_native_head_t(montage)\n _ensure_trans(trans)\n # ensure that we can save and load it, too\n fname = tmpdir.join('temp-mon.fif')\n _check_roundtrip(montage, fname, 'mri')\n\n\ndef test_fiducials():\n \"\"\"Test handling of fiducials.\"\"\"\n # Eventually the code used here should be unified with montage.py, but for\n # now it uses code in odd places\n for fname in (fif_fname, ctf_fif_fname):\n fids, coord_frame = read_fiducials(fname)\n points = _fiducial_coords(fids, coord_frame)\n assert points.shape == (3, 3)\n # Fids\n assert_allclose(points[:, 2], 0., atol=1e-6)\n assert_allclose(points[::2, 1], 0., atol=1e-6)\n assert points[2, 0] > 0 # RPA\n assert points[0, 0] < 0 # LPA\n # Nasion\n assert_allclose(points[1, 0], 0., atol=1e-6)\n assert points[1, 1] > 0\n\n\ndef test_documented():\n \"\"\"Test that standard montages are documented.\"\"\"\n docs = make_standard_montage.__doc__\n lines = [line[4:] for line in docs.splitlines()]\n start = stop = None\n for li, line in enumerate(lines):\n if line.startswith('====') and li < len(lines) - 2 and \\\n lines[li + 1].startswith('Kind') and\\\n lines[li + 2].startswith('===='):\n start = li + 3\n elif start is not None and li > start and line.startswith('===='):\n stop = li\n break\n assert (start is not None)\n assert (stop is not None)\n kinds = [line.split(' ')[0] for line in lines[start:stop]]\n kinds = [kind for kind in kinds if kind != '']\n montages = os.listdir(op.join(op.dirname(_mne_file), 'channels', 'data',\n 'montages'))\n montages = sorted(op.splitext(m)[0] for m in montages)\n assert_equal(len(set(montages)), len(montages))\n assert_equal(len(set(kinds)), len(kinds), err_msg=str(sorted(kinds)))\n assert_equal(set(montages), set(kinds))\n\n\[email protected]('reader, file_content, expected_dig, ext, warning', [\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('FidNz 0 9.071585155 -2.359754454\\n'\n 'FidT9 -6.711765 0.040402876 -3.251600355\\n'\n 'very_very_very_long_name -5.831241498 -4.494821698 4.955347697\\n'\n 'Cz 0 0 1\\n'\n 'Cz 0 0 8.899186843'),\n make_dig_montage(\n ch_pos={\n 'very_very_very_long_name': [-5.8312416, -4.4948215, 4.9553475], # noqa\n 'Cz': [0., 0., 8.899187],\n },\n nasion=[0., 9.071585, -2.3597546],\n lpa=[-6.711765, 0.04040287, -3.2516003],\n rpa=None,\n ),\n 'sfp',\n (RuntimeWarning, r'Duplicate.*last will be used for Cz \\(2\\)'),\n id='sfp_duplicate'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('FidNz 0 9.071585155 -2.359754454\\n'\n 'FidT9 -6.711765 0.040402876 -3.251600355\\n'\n 'headshape 1 2 3\\n'\n 'headshape 4 5 6\\n'\n 'Cz 0 0 8.899186843'),\n make_dig_montage(\n hsp=[\n [1, 2, 3],\n [4, 5, 6],\n ],\n ch_pos={\n 'Cz': [0., 0., 8.899187],\n },\n nasion=[0., 9.071585, -2.3597546],\n lpa=[-6.711765, 0.04040287, -3.2516003],\n rpa=None,\n ),\n 'sfp',\n None,\n id='sfp_headshape'),\n\n pytest.param(\n partial(read_custom_montage, head_size=1),\n ('1\t 0\t 0.50669\t FPz\\n'\n '2\t 23\t 0.71\t \tEOG1\\n'\n '3\t -39.947\t 0.34459\t F3\\n'\n '4\t 0\t 0.25338\t Fz\\n'),\n make_dig_montage(\n ch_pos={\n 'EOG1': [0.30873816, 0.72734152, -0.61290705],\n 'F3': [-0.56705965, 0.67706631, 0.46906776],\n 'FPz': [0., 0.99977915, -0.02101571],\n 'Fz': [0., 0.71457525, 0.69955859],\n },\n nasion=None, lpa=None, rpa=None, coord_frame='head',\n ),\n 'loc',\n None,\n id='EEGLAB'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None, coord_frame='mri'),\n ('// MatLab Sphere coordinates [degrees] Cartesian coordinates\\n' # noqa: E501\n '// Label Theta Phi Radius X Y Z off sphere surface\\n' # noqa: E501\n 'E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011\\n' # noqa: E501\n 'E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000\\n' # noqa: E501\n 'E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000\\n' # noqa: E501\n 'E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022'), # noqa: E501\n make_dig_montage(\n ch_pos={\n 'E1': [0.7677, 0.5934, -0.2419],\n 'E3': [0.6084, 0.7704, 0.1908],\n 'E31': [0., 0.9816, -0.1908],\n 'E61': [-0.8857, 0.3579, -0.2957],\n },\n nasion=None, lpa=None, rpa=None, coord_frame='mri',\n ),\n 'csd',\n None,\n id='matlab'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('# ASA electrode file\\nReferenceLabel avg\\nUnitPosition mm\\n'\n 'NumberPositions= 68\\n'\n 'Positions\\n'\n '-86.0761 -19.9897 -47.9860\\n'\n '85.7939 -20.0093 -48.0310\\n'\n '0.0083 86.8110 -39.9830\\n'\n '-86.0761 -24.9897 -67.9860\\n'\n 'Labels\\nLPA\\nRPA\\nNz\\nDummy\\n'),\n make_dig_montage(\n ch_pos={\n 'Dummy': [-0.0860761, -0.0249897, -0.067986],\n },\n nasion=[8.3000e-06, 8.6811e-02, -3.9983e-02],\n lpa=[-0.0860761, -0.0199897, -0.047986],\n rpa=[0.0857939, -0.0200093, -0.048031],\n ),\n 'elc',\n None,\n id='ASA electrode'),\n\n pytest.param(\n partial(read_custom_montage, head_size=1),\n ('Site Theta Phi\\n'\n 'Fp1 -92 -72\\n'\n 'Fp2 92 72\\n'\n 'very_very_very_long_name -92 72\\n'\n 'O2 92 -90\\n'),\n make_dig_montage(\n ch_pos={\n 'Fp1': [-0.30882875, 0.95047716, -0.0348995],\n 'Fp2': [0.30882875, 0.95047716, -0.0348995],\n 'very_very_very_long_name': [-0.30882875, -0.95047716, -0.0348995], # noqa\n 'O2': [6.11950389e-17, -9.99390827e-01, -3.48994967e-02]\n },\n nasion=None, lpa=None, rpa=None,\n ),\n 'txt',\n None,\n id='generic theta-phi (txt)'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('346\\n' # XXX: this should actually race an error 346 != 4\n 'FID\\t LPA\\t -120.03\\t 0\\t 85\\n'\n 'FID\\t RPA\\t 120.03\\t 0\\t 85\\n'\n 'FID\\t Nz\\t 114.03\\t 90\\t 85\\n'\n 'EEG\\t F3\\t -62.027\\t -50.053\\t 85\\n'\n 'EEG\\t Fz\\t 45.608\\t 90\\t 85\\n'\n 'EEG\\t F4\\t 62.01\\t 50.103\\t 85\\n'\n 'EEG\\t FCz\\t 68.01\\t 58.103\\t 85\\n'),\n make_dig_montage(\n ch_pos={\n 'F3': [-0.48200427, 0.57551063, 0.39869712],\n 'Fz': [3.71915931e-17, 6.07384809e-01, 5.94629038e-01],\n 'F4': [0.48142596, 0.57584026, 0.39891983],\n 'FCz': [0.41645989, 0.66914889, 0.31827805],\n },\n nasion=[4.75366562e-17, 7.76332511e-01, -3.46132681e-01],\n lpa=[-7.35898963e-01, 9.01216309e-17, -4.25385374e-01],\n rpa=[0.73589896, 0., -0.42538537],\n ),\n 'elp',\n None,\n id='BESA spherical model'),\n\n pytest.param(\n partial(read_dig_hpts, unit='m'),\n ('eeg Fp1 -95.0 -3. -3.\\n'\n 'eeg AF7 -1 -1 -3\\n'\n 'eeg A3 -2 -2 2\\n'\n 'eeg A 0 0 0'),\n make_dig_montage(\n ch_pos={\n 'A': [0., 0., 0.], 'A3': [-2., -2., 2.],\n 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.],\n },\n nasion=None, lpa=None, rpa=None,\n ),\n 'hpts',\n None,\n id='legacy mne-c'),\n\n pytest.param(\n partial(read_custom_montage, head_size=None),\n ('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\\n'\n '<!-- Generated by EasyCap Configurator 19.05.2014 -->\\n'\n '<Electrodes defaults=\"false\">\\n'\n ' <Electrode>\\n'\n ' <Name>Fp1</Name>\\n'\n ' <Theta>-90</Theta>\\n'\n ' <Phi>-72</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>1</Number>\\n'\n ' </Electrode>\\n'\n ' <Electrode>\\n'\n ' <Name>Fz</Name>\\n'\n ' <Theta>45</Theta>\\n'\n ' <Phi>90</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>2</Number>\\n'\n ' </Electrode>\\n'\n ' <Electrode>\\n'\n ' <Name>F3</Name>\\n'\n ' <Theta>-60</Theta>\\n'\n ' <Phi>-51</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>3</Number>\\n'\n ' </Electrode>\\n'\n ' <Electrode>\\n'\n ' <Name>F7</Name>\\n'\n ' <Theta>-90</Theta>\\n'\n ' <Phi>-36</Phi>\\n'\n ' <Radius>1</Radius>\\n'\n ' <Number>4</Number>\\n'\n ' </Electrode>\\n'\n '</Electrodes>'),\n make_dig_montage(\n ch_pos={\n 'Fp1': [-3.09016994e-01, 9.51056516e-01, 6.12323400e-17],\n 'Fz': [4.32978028e-17, 7.07106781e-01, 7.07106781e-01],\n 'F3': [-0.54500745, 0.67302815, 0.5],\n 'F7': [-8.09016994e-01, 5.87785252e-01, 6.12323400e-17],\n },\n nasion=None, lpa=None, rpa=None,\n ),\n 'bvef',\n None,\n id='brainvision'),\n])\ndef test_montage_readers(\n reader, file_content, expected_dig, ext, warning, tmpdir\n):\n \"\"\"Test that we have an equivalent of read_montage for all file formats.\"\"\"\n fname = op.join(str(tmpdir), 'test.{ext}'.format(ext=ext))\n with open(fname, 'w') as fid:\n fid.write(file_content)\n\n if warning is None:\n ctx = nullcontext()\n else:\n ctx = pytest.warns(warning[0], match=warning[1])\n with ctx:\n dig_montage = reader(fname)\n assert isinstance(dig_montage, DigMontage)\n\n actual_ch_pos = dig_montage._get_ch_pos()\n expected_ch_pos = expected_dig._get_ch_pos()\n for kk in actual_ch_pos:\n assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5)\n assert len(dig_montage.dig) == len(expected_dig.dig)\n for d1, d2 in zip(dig_montage.dig, expected_dig.dig):\n assert d1['coord_frame'] == d2['coord_frame']\n for key in ('coord_frame', 'ident', 'kind'):\n assert isinstance(d1[key], int)\n assert isinstance(d2[key], int)\n with pytest.warns(None) as w:\n xform = compute_native_head_t(dig_montage)\n assert xform['to'] == FIFF.FIFFV_COORD_HEAD\n assert xform['from'] == FIFF.FIFFV_COORD_UNKNOWN\n n = int(np.allclose(xform['trans'], np.eye(4)))\n assert len(w) == n\n\n\[email protected]_testing_data\ndef test_read_locs():\n \"\"\"Test reading EEGLAB locs.\"\"\"\n data = read_custom_montage(locs_montage_fname)._get_ch_pos()\n assert_allclose(\n actual=np.stack(\n [data[kk] for kk in ('FPz', 'EOG1', 'F3', 'Fz')] # 4 random chs\n ),\n desired=[[0., 0.094979, -0.001996],\n [0.02933, 0.069097, -0.058226],\n [-0.053871, 0.064321, 0.044561],\n [0., 0.067885, 0.066458]],\n atol=1e-6\n )\n\n\ndef test_read_dig_dat(tmpdir):\n \"\"\"Test reading *.dat electrode locations.\"\"\"\n rows = [\n ['Nasion', 78, 0.00, 1.00, 0.00],\n ['Left', 76, -1.00, 0.00, 0.00],\n ['Right', 82, 1.00, -0.00, 0.00],\n ['O2', 69, -0.50, -0.90, 0.05],\n ['O2', 68, 0.00, 0.01, 0.02],\n ['Centroid', 67, 0.00, 0.00, 0.00],\n ]\n # write mock test.dat file\n temp_dir = str(tmpdir)\n fname_temp = op.join(temp_dir, 'test.dat')\n with open(fname_temp, 'w') as fid:\n for row in rows:\n name = row[0].rjust(10)\n data = '\\t'.join(map(str, row[1:]))\n fid.write(\"%s\\t%s\\n\" % (name, data))\n # construct expected value\n idents = {\n 78: FIFF.FIFFV_POINT_NASION,\n 76: FIFF.FIFFV_POINT_LPA,\n 82: FIFF.FIFFV_POINT_RPA,\n 68: 1,\n 69: 1,\n }\n kinds = {\n 78: FIFF.FIFFV_POINT_CARDINAL,\n 76: FIFF.FIFFV_POINT_CARDINAL,\n 82: FIFF.FIFFV_POINT_CARDINAL,\n 69: FIFF.FIFFV_POINT_EEG,\n 68: FIFF.FIFFV_POINT_EEG,\n }\n target = {row[0]: {'r': row[2:], 'ident': idents[row[1]],\n 'kind': kinds[row[1]], 'coord_frame': 0}\n for row in rows[:-1]}\n assert_allclose(target['O2']['r'], [0, 0.01, 0.02])\n # read it\n with pytest.warns(RuntimeWarning, match=r'Duplic.*for O2 \\(2\\)'):\n dig = read_dig_dat(fname_temp)\n assert set(dig.ch_names) == {'O2'}\n keys = chain(['Left', 'Nasion', 'Right'], dig.ch_names)\n target = [target[k] for k in keys]\n assert dig.dig == target\n\n\ndef test_read_dig_montage_using_polhemus_fastscan():\n \"\"\"Test FastScan.\"\"\"\n N_EEG_CH = 10\n\n my_electrode_positions = read_polhemus_fastscan(\n op.join(kit_dir, 'test_elp.txt')\n )\n\n montage = make_dig_montage(\n # EEG_CH\n ch_pos=dict(zip(ascii_lowercase[:N_EEG_CH],\n np.random.RandomState(0).rand(N_EEG_CH, 3))),\n # NO NAMED points\n nasion=my_electrode_positions[0],\n lpa=my_electrode_positions[1],\n rpa=my_electrode_positions[2],\n hpi=my_electrode_positions[3:],\n hsp=read_polhemus_fastscan(op.join(kit_dir, 'test_hsp.txt')),\n\n # Other defaults\n coord_frame='unknown'\n )\n\n assert repr(montage) == (\n '<DigMontage | '\n '500 extras (headshape), 5 HPIs, 3 fiducials, 10 channels>'\n ) # XXX: is this wrong? extra is not in headspace, is it?\n\n assert set([d['coord_frame'] for d in montage.dig]) == {\n FIFF.FIFFV_COORD_UNKNOWN\n } # XXX: so far we build everything in 'unknown'\n\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': [0.001393, 0.0131613, -0.0046967],\n 'lpa': [-0.0624997, -0.0737271, 0.07996],\n 'rpa': [-0.0748957, 0.0873785, 0.0811943],\n }\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_allclose(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n\ndef test_read_dig_montage_using_polhemus_fastscan_error_handling(tmpdir):\n \"\"\"Test reading Polhemus FastSCAN errors.\"\"\"\n with open(op.join(kit_dir, 'test_elp.txt')) as fid:\n content = fid.read().replace('FastSCAN', 'XxxxXXXX')\n\n fname = str(tmpdir.join('faulty_FastSCAN.txt'))\n with open(fname, 'w') as fid:\n fid.write(content)\n\n with pytest.raises(ValueError, match='not contain Polhemus FastSCAN'):\n _ = read_polhemus_fastscan(fname)\n\n EXPECTED_ERR_MSG = \"allowed value is '.txt', but got '.bar' instead\"\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = read_polhemus_fastscan(fname=tmpdir.join('foo.bar'))\n\n\ndef test_read_dig_polhemus_isotrak_hsp():\n \"\"\"Test reading Polhemus IsoTrak HSP file.\"\"\"\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),\n 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),\n 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),\n }\n montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.hsp'),\n ch_names=None)\n assert repr(montage) == (\n '<DigMontage | '\n '500 extras (headshape), 0 HPIs, 3 fiducials, 0 channels>'\n )\n\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n\ndef test_read_dig_polhemus_isotrak_elp():\n \"\"\"Test reading Polhemus IsoTrak ELP file.\"\"\"\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),\n 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),\n 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),\n }\n montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.elp'),\n ch_names=None)\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 5 HPIs, 3 fiducials, 0 channels>'\n )\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n\[email protected](scope='module')\ndef isotrak_eeg(tmpdir_factory):\n \"\"\"Mock isotrak file with EEG positions.\"\"\"\n _SEED = 42\n N_ROWS, N_COLS = 5, 3\n content = np.random.RandomState(_SEED).randn(N_ROWS, N_COLS)\n\n fname = tmpdir_factory.mktemp('data').join('test.eeg')\n with open(str(fname), 'w') as fid:\n fid.write((\n '3\t200\\n'\n '//Shape file\\n'\n '//Minor revision number\\n'\n '2\\n'\n '//Subject Name\\n'\n '%N\tName \\n'\n '////Shape code, number of digitized points\\n'\n ))\n fid.write('0 {rows:d}\\n'.format(rows=N_ROWS))\n fid.write((\n '//Position of fiducials X+, Y+, Y- on the subject\\n'\n '%F\t0.11056\t-5.421e-19\t0\t\\n'\n '%F\t-0.00021075\t0.080793\t-7.5894e-19\t\\n'\n '%F\t0.00021075\t-0.080793\t-2.8731e-18\t\\n'\n '//No of rows, no of columns; position of digitized points\\n'\n ))\n fid.write('{rows:d} {cols:d}\\n'.format(rows=N_ROWS, cols=N_COLS))\n for row in content:\n fid.write('\\t'.join('%0.18e' % cell for cell in row) + '\\n')\n\n return str(fname)\n\n\ndef test_read_dig_polhemus_isotrak_eeg(isotrak_eeg):\n \"\"\"Test reading Polhemus IsoTrak EEG positions.\"\"\"\n N_CHANNELS = 5\n _SEED = 42\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),\n 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),\n 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),\n }\n ch_names = ['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS)]\n EXPECTED_CH_POS = dict(zip(\n ch_names, np.random.RandomState(_SEED).randn(N_CHANNELS, 3)))\n\n montage = read_dig_polhemus_isotrak(fname=isotrak_eeg, ch_names=ch_names)\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 0 HPIs, 3 fiducials, 5 channels>'\n )\n\n fiducials, fid_coordframe = _get_fid_coords(montage.dig)\n\n assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN\n for kk, val in fiducials.items():\n assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])\n\n for kk, dig_point in zip(montage.ch_names, _get_dig_eeg(montage.dig)):\n assert_array_equal(dig_point['r'], EXPECTED_CH_POS[kk])\n assert dig_point['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN\n\n\ndef test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmpdir):\n \"\"\"Test errors in reading Polhemus IsoTrak files.\n\n 1 - matching ch_names and number of points in isotrak file.\n 2 - error for unsupported file extensions.\n \"\"\"\n # Check ch_names\n N_CHANNELS = 5\n EXPECTED_ERR_MSG = \"not match the number of points.*Expected.*5, given 47\"\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = read_dig_polhemus_isotrak(\n fname=isotrak_eeg,\n ch_names=['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS + 42)]\n )\n\n # Check fname extensions\n fname = op.join(tmpdir, 'foo.bar')\n with pytest.raises(\n ValueError,\n match=\"Allowed val.*'.hsp', '.elp', and '.eeg', but got '.bar' instead\"\n ):\n _ = read_dig_polhemus_isotrak(fname=fname, ch_names=None)\n\n\ndef test_combining_digmontage_objects():\n \"\"\"Test combining different DigMontage objects.\"\"\"\n rng = np.random.RandomState(0)\n fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3)))\n\n # hsp positions are [1X, 1X, 1X]\n hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.))\n hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.))\n hsp3 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 13.))\n\n # hpi positions are [2X, 2X, 2X]\n hpi1 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 21.))\n hpi2 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 22.))\n hpi3 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 23.))\n\n # channels have positions at 40s, 50s, and 60s.\n ch_pos1 = make_dig_montage(\n **fiducials,\n ch_pos={'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43]}\n )\n ch_pos2 = make_dig_montage(\n **fiducials,\n ch_pos={'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53]}\n )\n ch_pos3 = make_dig_montage(\n **fiducials,\n ch_pos={'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63]}\n )\n\n montage = (\n DigMontage() + hsp1 + hsp2 + hsp3 + hpi1 + hpi2 + hpi3 + ch_pos1 +\n ch_pos2 + ch_pos3\n )\n assert repr(montage) == (\n '<DigMontage | '\n '6 extras (headshape), 6 HPIs, 3 fiducials, 9 channels>'\n )\n\n EXPECTED_MONTAGE = make_dig_montage(\n **fiducials,\n hsp=np.concatenate([np.full((2, 3), 11.), np.full((2, 3), 12.),\n np.full((2, 3), 13.)]),\n hpi=np.concatenate([np.full((2, 3), 21.), np.full((2, 3), 22.),\n np.full((2, 3), 23.)]),\n ch_pos={\n 'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43],\n 'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53],\n 'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63],\n }\n )\n\n # Do some checks to ensure they are the same DigMontage\n assert len(montage.ch_names) == len(EXPECTED_MONTAGE.ch_names)\n assert all([c in montage.ch_names for c in EXPECTED_MONTAGE.ch_names])\n actual_occurrences = _count_points_by_type(montage.dig)\n expected_occurrences = _count_points_by_type(EXPECTED_MONTAGE.dig)\n assert actual_occurrences == expected_occurrences\n\n\ndef test_combining_digmontage_forbiden_behaviors():\n \"\"\"Test combining different DigMontage objects with repeated names.\"\"\"\n rng = np.random.RandomState(0)\n fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3)))\n dig1 = make_dig_montage(\n **fiducials,\n ch_pos=dict(zip(list('abc'), rng.rand(3, 3))),\n )\n dig2 = make_dig_montage(\n **fiducials,\n ch_pos=dict(zip(list('bcd'), rng.rand(3, 3))),\n )\n dig2_wrong_fid = make_dig_montage(\n nasion=rng.rand(3), lpa=rng.rand(3), rpa=rng.rand(3),\n ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))),\n )\n dig2_wrong_coordframe = make_dig_montage(\n **fiducials,\n ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))),\n coord_frame='meg'\n )\n\n EXPECTED_ERR_MSG = \"Cannot.*duplicated channel.*found: \\'b\\', \\'c\\'.\"\n with pytest.raises(RuntimeError, match=EXPECTED_ERR_MSG):\n _ = dig1 + dig2\n\n with pytest.raises(RuntimeError, match='fiducial locations do not match'):\n _ = dig1 + dig2_wrong_fid\n\n with pytest.raises(RuntimeError, match='not in the same coordinate '):\n _ = dig1 + dig2_wrong_coordframe\n\n\ndef test_set_dig_montage():\n \"\"\"Test setting DigMontage with toy understandable points.\"\"\"\n N_CHANNELS, N_HSP, N_HPI = 3, 2, 1\n ch_names = list(ascii_lowercase[:N_CHANNELS])\n ch_pos = dict(zip(\n ch_names,\n np.arange(N_CHANNELS * 3).reshape(N_CHANNELS, 3),\n ))\n\n montage_ch_only = make_dig_montage(ch_pos=ch_pos, coord_frame='head')\n\n assert repr(montage_ch_only) == (\n '<DigMontage | 0 extras (headshape), 0 HPIs, 0 fiducials, 3 channels>'\n )\n info = create_info(ch_names, sfreq=1, ch_types='eeg')\n info.set_montage(montage_ch_only)\n assert len(info['dig']) == len(montage_ch_only.dig)\n\n assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]),\n desired=[[0., 1., 2., 0., 0., 0.],\n [3., 4., 5., 0., 0., 0.],\n [6., 7., 8., 0., 0., 0.]])\n\n montage_full = make_dig_montage(\n ch_pos=dict(**ch_pos, EEG000=np.full(3, 42)), # 4 = 3 egg + 1 eeg_ref\n nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3],\n hsp=np.full((N_HSP, 3), 4),\n hpi=np.full((N_HPI, 3), 4),\n coord_frame='head'\n )\n\n assert repr(montage_full) == (\n '<DigMontage | 2 extras (headshape), 1 HPIs, 3 fiducials, 4 channels>'\n )\n\n info = create_info(ch_names, sfreq=1, ch_types='eeg')\n info.set_montage(montage_full)\n EXPECTED_LEN = sum({'hsp': 2, 'hpi': 1, 'fid': 3, 'eeg': 4}.values())\n assert len(info['dig']) == EXPECTED_LEN\n assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]),\n desired=[[0., 1., 2., 42., 42., 42.],\n [3., 4., 5., 42., 42., 42.],\n [6., 7., 8., 42., 42., 42.]])\n\n\[email protected]_testing_data\ndef test_fif_dig_montage(tmpdir):\n \"\"\"Test FIF dig montage support.\"\"\"\n dig_montage = read_dig_fif(fif_dig_montage_fname)\n\n # test round-trip IO\n temp_dir = str(tmpdir)\n fname_temp = op.join(temp_dir, 'test.fif')\n _check_roundtrip(dig_montage, fname_temp)\n\n # Make a BrainVision file like the one the user would have had\n raw_bv = read_raw_brainvision(bv_fname, preload=True)\n raw_bv_2 = raw_bv.copy()\n mapping = dict()\n for ii, ch_name in enumerate(raw_bv.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 1,)\n raw_bv.rename_channels(mapping)\n for ii, ch_name in enumerate(raw_bv_2.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 33,)\n raw_bv_2.rename_channels(mapping)\n raw_bv.add_channels([raw_bv_2])\n for ch in raw_bv.info['chs']:\n ch['kind'] = FIFF.FIFFV_EEG_CH\n\n # Set the montage\n raw_bv.set_montage(dig_montage)\n\n # Check the result\n evoked = read_evokeds(evoked_fname)[0]\n\n # check info[chs] matches\n assert_equal(len(raw_bv.ch_names), len(evoked.ch_names) - 1)\n for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs'][:-1]):\n assert_equal(ch_py['ch_name'],\n ch_c['ch_name'].replace('EEG ', 'EEG'))\n # C actually says it's unknown, but it's not (?):\n # assert_equal(ch_py['coord_frame'], ch_c['coord_frame'])\n assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD)\n c_loc = ch_c['loc'].copy()\n c_loc[c_loc == 0] = np.nan\n assert_allclose(ch_py['loc'], c_loc, atol=1e-7)\n\n # check info[dig]\n assert_dig_allclose(raw_bv.info, evoked.info)\n\n # Roundtrip of non-FIF start\n montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp),\n hpi=read_mrk(hpi))\n elp_points = read_polhemus_fastscan(elp)\n ch_pos = {\"EEG%03d\" % (k + 1): pos for k, pos in enumerate(elp_points[8:])}\n montage += make_dig_montage(nasion=elp_points[0],\n lpa=elp_points[1],\n rpa=elp_points[2],\n ch_pos=ch_pos)\n _check_roundtrip(montage, fname_temp, 'unknown')\n montage = transform_to_head(montage)\n _check_roundtrip(montage, fname_temp)\n montage.dig[0]['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN\n with pytest.raises(RuntimeError, match='Only a single coordinate'):\n montage.save(fname_temp)\n\n\[email protected]_testing_data\ndef test_egi_dig_montage(tmpdir):\n \"\"\"Test EGI MFF XML dig montage support.\"\"\"\n dig_montage = read_dig_egi(egi_dig_montage_fname)\n fid, coord = _get_fid_coords(dig_montage.dig)\n\n assert coord == FIFF.FIFFV_COORD_UNKNOWN\n assert_allclose(\n actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),\n desired=[[ 0. , 10.564, -2.051], # noqa\n [-8.592, 0.498, -4.128], # noqa\n [ 8.592, 0.498, -4.128]], # noqa\n )\n\n # Test accuracy and embedding within raw object\n raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d')\n\n raw_egi.set_montage(dig_montage)\n test_raw_egi = read_raw_fif(egi_fif_fname)\n\n assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names))\n for ch_raw, ch_test_raw in zip(raw_egi.info['chs'],\n test_raw_egi.info['chs']):\n assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name'])\n assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD)\n assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7)\n\n assert_dig_allclose(raw_egi.info, test_raw_egi.info)\n\n dig_montage_in_head = transform_to_head(dig_montage.copy())\n fid, coord = _get_fid_coords(dig_montage_in_head.dig)\n assert coord == FIFF.FIFFV_COORD_HEAD\n assert_allclose(\n actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),\n desired=[[0., 10.278, 0.], [-8.592, 0., 0.], [8.592, 0., 0.]],\n atol=1e-4,\n )\n\n # test round-trip IO\n fname_temp = tmpdir.join('egi_test.fif')\n _check_roundtrip(dig_montage, fname_temp, 'unknown')\n _check_roundtrip(dig_montage_in_head, fname_temp)\n\n\ndef _pop_montage(dig_montage, ch_name):\n # remove reference that was not used in old API\n name_idx = dig_montage.ch_names.index(ch_name)\n dig_idx = dig_montage._get_dig_names().index(ch_name)\n\n del dig_montage.dig[dig_idx]\n del dig_montage.ch_names[name_idx]\n for k in range(dig_idx, len(dig_montage.dig)):\n dig_montage.dig[k]['ident'] -= 1\n\n\[email protected]_testing_data\ndef test_read_dig_captrak(tmpdir):\n \"\"\"Test reading a captrak montage file.\"\"\"\n EXPECTED_CH_NAMES_OLD = [\n 'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1',\n 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4',\n 'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6',\n 'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'GND', 'O1', 'O2',\n 'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3',\n 'PO4', 'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'REF', 'T7', 'T8', 'TP10',\n 'TP7', 'TP8', 'TP9'\n ]\n EXPECTED_CH_NAMES = [\n 'T7', 'FC5', 'F7', 'C5', 'FT7', 'FT9', 'TP7', 'TP9', 'P7', 'CP5',\n 'PO7', 'C3', 'CP3', 'P5', 'P3', 'PO3', 'PO9', 'O1', 'Oz', 'POz', 'O2',\n 'PO4', 'P1', 'Pz', 'P2', 'CP2', 'CP1', 'CPz', 'Cz', 'C1', 'FC1', 'FC3',\n 'REF', 'F3', 'F1', 'Fz', 'F5', 'AF7', 'AF3', 'Fp1', 'GND', 'F2', 'AF4',\n 'Fp2', 'F4', 'F8', 'F6', 'AF8', 'FC2', 'FC6', 'FC4', 'C2', 'C4', 'P4',\n 'CP4', 'PO8', 'P8', 'P6', 'CP6', 'PO10', 'TP10', 'TP8', 'FT10', 'T8',\n 'C6', 'FT8'\n ]\n assert set(EXPECTED_CH_NAMES) == set(EXPECTED_CH_NAMES_OLD)\n montage = read_dig_captrak(\n fname=op.join(data_path, 'montage', 'captrak_coords.bvct')\n )\n\n assert montage.ch_names == EXPECTED_CH_NAMES\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 0 HPIs, 3 fiducials, 66 channels>'\n )\n\n montage = transform_to_head(montage) # transform_to_head has to be tested\n _check_roundtrip(montage=montage, fname=str(tmpdir.join('bvct_test.fif')))\n\n fid, _ = _get_fid_coords(montage.dig)\n assert_allclose(\n actual=np.array([fid.nasion, fid.lpa, fid.rpa]),\n desired=[[0, 0.11309, 0], [-0.09189, 0, 0], [0.09240, 0, 0]],\n atol=1e-5,\n )\n\n raw_bv = read_raw_brainvision(bv_raw_fname)\n raw_bv.set_channel_types({\"HEOG\": 'eog', \"VEOG\": 'eog', \"ECG\": 'ecg'})\n\n raw_bv.set_montage(montage)\n\n test_raw_bv = read_raw_fif(bv_fif_fname)\n\n # compare after set_montage using chs loc.\n for actual, expected in zip(raw_bv.info['chs'], test_raw_bv.info['chs']):\n assert_allclose(actual['loc'][:3], expected['loc'][:3])\n if actual['kind'] == FIFF.FIFFV_EEG_CH:\n assert_allclose(actual['loc'][3:6],\n [-0.005103, 0.05395, 0.144622], rtol=1e-04)\n\n\n# https://gist.github.com/larsoner/2264fb5895070d29a8c9aa7c0dc0e8a6\n_MGH60 = [\n 'Fz', 'F2', 'AF4', 'Fpz', 'Fp1', 'AF8', 'FT9', 'F7', 'FC5', 'FC6', 'FT7',\n 'F1', 'AF7', 'FT8', 'F6', 'F5', 'FC1', 'FC2', 'FT10', 'T9', 'Cz', 'F4',\n 'T7', 'C2', 'C4', 'C1', 'C3', 'F8', 'F3', 'C5', 'Fp2', 'AF3',\n 'CP2', 'P2', 'O2', 'Iz', 'Oz', 'PO4', 'O1', 'P8', 'PO8', 'P6', 'PO7', 'PO3', 'C6', 'TP9', 'TP8', 'CP4', 'P4', # noqa\n 'CP3', 'CP1', 'TP7', 'P3', 'Pz', 'P1', 'P7', 'P5', 'TP10', 'T8', 'T10',\n]\n\n\[email protected]('rename', ('raw', 'montage', 'custom'))\ndef test_set_montage_mgh(rename):\n \"\"\"Test setting 'mgh60' montage to old fif.\"\"\"\n raw = read_raw_fif(fif_fname)\n eeg_picks = pick_types(raw.info, meg=False, eeg=True, exclude=())\n assert list(eeg_picks) == [ii for ii, name in enumerate(raw.ch_names)\n if name.startswith('EEG')]\n orig_pos = np.array([raw.info['chs'][pick]['loc'][:3]\n for pick in eeg_picks])\n atol = 1e-6\n if rename == 'raw':\n raw.rename_channels(lambda x: x.replace('EEG ', 'EEG'))\n raw.set_montage('mgh60') # test loading with string argument\n elif rename == 'montage':\n mon = make_standard_montage('mgh60')\n mon.rename_channels(lambda x: x.replace('EEG', 'EEG '))\n assert [raw.ch_names[pick] for pick in eeg_picks] == mon.ch_names\n raw.set_montage(mon)\n else:\n atol = 3e-3 # XXX old defs here apparently (maybe not realistic)?\n assert rename == 'custom'\n assert len(_MGH60) == 60\n mon = make_standard_montage('standard_1020')\n\n def renamer(x):\n try:\n return 'EEG %03d' % (_MGH60.index(x) + 1,)\n except ValueError:\n return x\n\n mon.rename_channels(renamer)\n raw.set_montage(mon)\n\n new_pos = np.array([ch['loc'][:3] for ch in raw.info['chs']\n if ch['ch_name'].startswith('EEG')])\n assert ((orig_pos != new_pos).all())\n\n r0 = _fit_sphere(new_pos)[1]\n assert_allclose(r0, [0.000775, 0.006881, 0.047398], atol=1e-3)\n # spot check\n assert_allclose(new_pos[:2], [[0.000273, 0.084920, 0.105838],\n [0.028822, 0.083529, 0.099164]], atol=atol)\n\n\n# XXX: this does not check ch_names + it cannot work because of write_dig\ndef _check_roundtrip(montage, fname, coord_frame='head'):\n \"\"\"Check roundtrip writing.\"\"\"\n montage.save(fname)\n montage_read = read_dig_fif(fname=fname)\n\n assert_equal(repr(montage), repr(montage_read))\n assert_equal(_check_get_coord_frame(montage_read.dig), coord_frame)\n assert_dig_allclose(montage, montage_read)\n\n\ndef _fake_montage(ch_names):\n pos = np.random.RandomState(42).randn(len(ch_names), 3)\n return make_dig_montage(ch_pos=dict(zip(ch_names, pos)),\n coord_frame='head')\n\n\ncnt_ignore_warns = [\n pytest.mark.filterwarnings(\n 'ignore:.*Could not parse meas date from the header. Setting to None.'\n ),\n pytest.mark.filterwarnings((\n 'ignore:.*Could not define the number of bytes automatically.'\n ' Defaulting to 2.')\n ),\n]\n\n\ndef test_digmontage_constructor_errors():\n \"\"\"Test proper error messaging.\"\"\"\n with pytest.raises(ValueError, match='does not match the number'):\n _ = DigMontage(ch_names=['foo', 'bar'], dig=list())\n\n\ndef test_transform_to_head_and_compute_dev_head_t():\n \"\"\"Test transform_to_head and compute_dev_head_t.\"\"\"\n EXPECTED_DEV_HEAD_T = \\\n [[-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04],\n [8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02],\n [-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02],\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]\n\n EXPECTED_FID_IN_POLHEMUS = {\n 'nasion': np.array([0.001393, 0.0131613, -0.0046967]),\n 'lpa': np.array([-0.0624997, -0.0737271, 0.07996]),\n 'rpa': np.array([-0.0748957, 0.0873785, 0.0811943]),\n }\n\n EXPECTED_FID_IN_HEAD = {\n 'nasion': np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]),\n 'lpa': np.array([-8.10816716e-02, 6.56321671e-18, 0]),\n 'rpa': np.array([8.05048781e-02, -6.47441364e-18, 0]),\n }\n\n hpi_dev = np.array(\n [[ 2.13951493e-02, 8.47444056e-02, -5.65431188e-02], # noqa\n [ 2.10299433e-02, -8.03141101e-02, -6.34420259e-02], # noqa\n [ 1.05916829e-01, 8.18485672e-05, 1.19928083e-02], # noqa\n [ 9.26595105e-02, 4.64804385e-02, 8.45141253e-03], # noqa\n [ 9.42554419e-02, -4.35206589e-02, 8.78999363e-03]] # noqa\n )\n\n hpi_polhemus = np.array(\n [[-0.0595004, -0.0704836, 0.075893 ], # noqa\n [-0.0646373, 0.0838228, 0.0762123], # noqa\n [-0.0135035, 0.0072522, -0.0268405], # noqa\n [-0.0202967, -0.0351498, -0.0129305], # noqa\n [-0.0277519, 0.0452628, -0.0222407]] # noqa\n )\n\n montage_polhemus = make_dig_montage(\n **EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame='unknown'\n )\n\n montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame='meg')\n\n # Test regular worflow to get dev_head_t\n montage = montage_polhemus + montage_meg\n fids, _ = _get_fid_coords(montage.dig)\n for kk in fids:\n assert_allclose(fids[kk], EXPECTED_FID_IN_POLHEMUS[kk], atol=1e-5)\n\n with pytest.raises(ValueError, match='set to head coordinate system'):\n _ = compute_dev_head_t(montage)\n\n montage = transform_to_head(montage)\n\n fids, _ = _get_fid_coords(montage.dig)\n for kk in fids:\n assert_allclose(fids[kk], EXPECTED_FID_IN_HEAD[kk], atol=1e-5)\n\n dev_head_t = compute_dev_head_t(montage)\n assert_allclose(dev_head_t['trans'], EXPECTED_DEV_HEAD_T, atol=5e-7)\n\n # Test errors when number of HPI points do not match\n EXPECTED_ERR_MSG = 'Device-to-Head .*Got 0 .*device and 5 points in head'\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = compute_dev_head_t(transform_to_head(montage_polhemus))\n\n EXPECTED_ERR_MSG = 'Device-to-Head .*Got 5 .*device and 0 points in head'\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = compute_dev_head_t(transform_to_head(\n montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS)\n ))\n\n EXPECTED_ERR_MSG = 'Device-to-Head .*Got 3 .*device and 5 points in head'\n with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):\n _ = compute_dev_head_t(transform_to_head(\n DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) +\n montage_polhemus\n ))\n\n\ndef test_set_montage_with_mismatching_ch_names():\n \"\"\"Test setting a DigMontage with mismatching ch_names.\"\"\"\n raw = read_raw_fif(fif_fname)\n montage = make_standard_montage('mgh60')\n\n # 'EEG 001' and 'EEG001' won't match\n missing_err = '60 channel positions not present'\n with pytest.raises(ValueError, match=missing_err):\n raw.set_montage(montage)\n\n montage.ch_names = [ # modify the names in place\n name.replace('EEG', 'EEG ') for name in montage.ch_names\n ]\n raw.set_montage(montage) # does not raise\n\n # Case sensitivity\n raw.rename_channels(lambda x: x.lower())\n with pytest.raises(ValueError, match=missing_err):\n raw.set_montage(montage)\n # should work\n raw.set_montage(montage, match_case=False)\n raw.rename_channels(lambda x: x.upper()) # restore\n assert 'EEG 001' in raw.ch_names and 'eeg 001' not in raw.ch_names\n raw.rename_channels({'EEG 002': 'eeg 001'})\n assert 'EEG 001' in raw.ch_names and 'eeg 001' in raw.ch_names\n raw.set_channel_types({'eeg 001': 'misc'})\n raw.set_montage(montage)\n raw.set_channel_types({'eeg 001': 'eeg'})\n with pytest.raises(ValueError, match='1 channel position not present'):\n raw.set_montage(montage)\n with pytest.raises(ValueError, match='match_case=False as 1 channel name'):\n raw.set_montage(montage, match_case=False)\n info = create_info(['EEG 001'], 1000., 'eeg')\n mon = make_dig_montage({'EEG 001': np.zeros(3), 'eeg 001': np.zeros(3)},\n nasion=[0, 1., 0], rpa=[1., 0, 0], lpa=[-1., 0, 0])\n info.set_montage(mon)\n with pytest.raises(ValueError, match='match_case=False as 1 montage name'):\n info.set_montage(mon, match_case=False)\n\n\ndef test_set_montage_with_sub_super_set_of_ch_names():\n \"\"\"Test info and montage ch_names matching criteria.\"\"\"\n N_CHANNELS = len('abcdef')\n montage = _make_toy_dig_montage(N_CHANNELS, coord_frame='head')\n\n # montage and info match\n info = create_info(ch_names=list('abcdef'), sfreq=1, ch_types='eeg')\n info.set_montage(montage)\n\n # montage is a SUPERset of info\n info = create_info(list('abc'), sfreq=1, ch_types='eeg')\n info.set_montage(montage)\n assert len(info['dig']) == len(list('abc'))\n\n # montage is a SUBset of info\n _MSG = 'subset of info. There are 2 .* not present in the DigMontage'\n info = create_info(ch_names=list('abcdfgh'), sfreq=1, ch_types='eeg')\n with pytest.raises(ValueError, match=_MSG) as exc:\n info.set_montage(montage)\n # plus suggestions\n assert exc.match('set_channel_types')\n assert exc.match('on_missing')\n\n\ndef test_heterogeneous_ch_type():\n \"\"\"Test ch_names matching criteria with heterogeneous ch_type.\"\"\"\n VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg')\n\n montage = _make_toy_dig_montage(\n n_channels=len(VALID_MONTAGE_NAMED_CHS),\n coord_frame='head',\n )\n\n # Montage and info match\n info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS))\n RawArray(np.zeros((3, 1)), info, copy=None).set_montage(montage)\n\n\ndef test_set_montage_coord_frame_in_head_vs_unknown():\n \"\"\"Test set montage using head and unknown only.\"\"\"\n N_CHANNELS, NaN = 3, np.nan\n\n raw = _make_toy_raw(N_CHANNELS)\n montage_in_head = _make_toy_dig_montage(N_CHANNELS, coord_frame='head')\n montage_in_unknown = _make_toy_dig_montage(\n N_CHANNELS, coord_frame='unknown'\n )\n montage_in_unknown_with_fid = _make_toy_dig_montage(\n N_CHANNELS, coord_frame='unknown',\n nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0],\n )\n\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=np.full((N_CHANNELS, 12), np.nan)\n )\n\n raw.set_montage(montage_in_head)\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=[\n [0., 1., 2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [3., 4., 5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [6., 7., 8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n ]\n )\n\n with pytest.warns(RuntimeWarning, match='assuming identity'):\n raw.set_montage(montage_in_unknown)\n\n raw.set_montage(montage_in_unknown_with_fid)\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=[\n [-0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [-6., 7., -8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n ]\n )\n\n # check no collateral effects from transforming montage\n assert _check_get_coord_frame(montage_in_unknown_with_fid.dig) == 'unknown'\n assert_array_equal(\n _get_dig_montage_pos(montage_in_unknown_with_fid),\n [[0, 1, 2], [3, 4, 5], [6, 7, 8]],\n )\n\n\ndef test_set_montage_with_missing_coordinates():\n \"\"\"Test set montage with missing coordinates.\"\"\"\n N_CHANNELS, NaN = 3, np.nan\n\n raw = _make_toy_raw(N_CHANNELS)\n raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names})\n # don't include all the channels\n ch_names = raw.ch_names[1:]\n n_channels = len(ch_names)\n ch_coords = np.arange(n_channels * 3).reshape(n_channels, 3)\n montage_in_mri = make_dig_montage(\n ch_pos=dict(zip(ch_names, ch_coords,)),\n coord_frame='unknown',\n nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0],\n )\n\n with pytest.raises(ValueError, match='DigMontage is '\n 'only a subset of info'):\n raw.set_montage(montage_in_mri)\n\n with pytest.raises(ValueError, match='Invalid value'):\n raw.set_montage(montage_in_mri, on_missing='foo')\n\n with pytest.raises(TypeError, match='must be an instance'):\n raw.set_montage(montage_in_mri, on_missing=True)\n\n with pytest.warns(RuntimeWarning, match='DigMontage is '\n 'only a subset of info'):\n raw.set_montage(montage_in_mri, on_missing='warn')\n\n raw.set_montage(montage_in_mri, on_missing='ignore')\n assert_allclose(\n actual=np.array([ch['loc'] for ch in raw.info['chs']]),\n desired=[\n [NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN],\n [0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],\n ]\n )\n\n\[email protected]_testing_data\ndef test_get_montage():\n \"\"\"Test get montage from Instance.\n\n Test with standard montage and then loaded in montage.\n \"\"\"\n # 1. read in testing data and assert montage roundtrip\n # for testing dataset: 'test_raw.fif'\n raw = read_raw_fif(fif_fname)\n raw = raw.rename_channels(lambda name: name.replace('EEG ', 'EEG'))\n raw2 = raw.copy()\n # get montage and then set montage and\n # it should be the same\n montage = raw.get_montage()\n raw.set_montage(montage, on_missing='raise')\n test_montage = raw.get_montage()\n assert_object_equal(raw.info['chs'], raw2.info['chs'])\n assert_dig_allclose(raw2.info, raw.info)\n assert_object_equal(raw2.info['dig'], raw.info['dig'])\n\n # the montage does not change\n assert_object_equal(montage.dig, test_montage.dig)\n\n # the montage should fulfill a roundtrip with make_dig_montage\n test2_montage = make_dig_montage(**montage.get_positions())\n assert_object_equal(test2_montage.dig, test_montage.dig)\n\n # 2. now do a standard montage\n montage = make_standard_montage('mgh60')\n # set the montage; note renaming to make standard montage map\n raw.set_montage(montage)\n\n # get montage back and set it\n # the channel locations should be the same\n raw2 = raw.copy()\n test_montage = raw.get_montage()\n raw.set_montage(test_montage, on_missing='ignore')\n\n # the montage should fulfill a roundtrip with make_dig_montage\n test2_montage = make_dig_montage(**test_montage.get_positions())\n assert_object_equal(test2_montage.dig, test_montage.dig)\n\n # chs should not change\n assert_object_equal(raw2.info['chs'], raw.info['chs'])\n # dig order might be different after set_montage\n assert montage.ch_names == test_montage.ch_names\n # note that test_montage will have different coordinate frame\n # compared to standard montage\n assert_dig_allclose(raw2.info, raw.info)\n assert_object_equal(raw2.info['dig'], raw.info['dig'])\n\n # 3. if montage gets set to None\n raw.set_montage(None)\n assert raw.get_montage() is None\n\n # 4. read in BV test dataset and make sure montage\n # fulfills roundtrip on non-standard montage\n dig_montage = read_dig_fif(fif_dig_montage_fname)\n\n # Make a BrainVision file like the one the user would have had\n # with testing dataset 'test.vhdr'\n raw_bv = read_raw_brainvision(bv_fname, preload=True)\n raw_bv_2 = raw_bv.copy()\n\n # rename channels to make it have the full set\n # of channels\n mapping = dict()\n for ii, ch_name in enumerate(raw_bv.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 1,)\n raw_bv.rename_channels(mapping)\n for ii, ch_name in enumerate(raw_bv_2.ch_names):\n mapping[ch_name] = 'EEG%03d' % (ii + 33,)\n raw_bv_2.rename_channels(mapping)\n raw_bv.add_channels([raw_bv_2])\n for ch in raw_bv.info['chs']:\n ch['kind'] = FIFF.FIFFV_EEG_CH\n\n # Set the montage and roundtrip\n raw_bv.set_montage(dig_montage)\n raw_bv2 = raw_bv.copy()\n\n # reset the montage\n test_montage = raw_bv.get_montage()\n raw_bv.set_montage(test_montage, on_missing='ignore')\n # dig order might be different after set_montage\n assert_object_equal(raw_bv2.info['dig'], raw_bv.info['dig'])\n assert_dig_allclose(raw_bv2.info, raw_bv.info)\n\n # if dig is not set in the info, then montage returns None\n raw.info['dig'] = None\n assert raw.get_montage() is None\n\n # the montage should fulfill a roundtrip with make_dig_montage\n test2_montage = make_dig_montage(**test_montage.get_positions())\n assert_object_equal(test2_montage.dig, test_montage.dig)\n\n\ndef test_read_dig_hpts():\n \"\"\"Test reading .hpts file (from MNE legacy).\"\"\"\n fname = op.join(\n op.dirname(_BRAINVISON_FILE), 'tests', 'data', 'test.hpts'\n )\n\n montage = read_dig_hpts(fname)\n assert repr(montage) == (\n '<DigMontage | '\n '0 extras (headshape), 5 HPIs, 3 fiducials, 34 channels>'\n )\n\n\ndef test_get_builtin_montages():\n \"\"\"Test help function to obtain builtin montages.\"\"\"\n EXPECTED_NUM = 24\n assert len(get_builtin_montages()) == EXPECTED_NUM\n\n\[email protected]_testing_data\ndef test_plot_montage():\n \"\"\"Test plotting montage.\"\"\"\n # gh-8025\n montage = read_dig_captrak(bvct_dig_montage_fname)\n montage.plot()\n plt.close('all')\n\n\nrun_tests_if_main()\n"
] | [
[
"numpy.eye",
"numpy.empty",
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.stack",
"numpy.testing.assert_array_equal",
"numpy.random.RandomState",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.full"
]
] |
NYC00kie/PhysWikiQuiz | [
"4243fd6fa6f23670b9743b6a2c79339a9f3d32fc"
] | [
"module_unit_tests.py"
] | [
"import unittest\n\nfrom old import module0_formula_and_identifier_retrieval\n\nimport pandas as pd\n\n# Python Tutorial: Unit Testing Your Code with the unittest Module:\n#https://www.youtube.com/watch?v=6tNS--WetLI\n\n# Retrieve sample QIDs\nsample_IDs_filepath = r'evaluation\\sample_IDs.csv'\nQIDs_column_name = 'QID'\n\ndef get_sample_QIDs():\n sample_IDs_table = pd.read_csv(sample_IDs_filepath,delimiter=';')\n sample_QIDs = list(sample_IDs_table[QIDs_column_name])\n return sample_QIDs\n\nclass TestModules(unittest.TestCase):\n\n # TEST MODULE0\n def test_module0(self):\n #qid = 'Q11376'\n #sample_QIDs = [qid]\n sample_QIDs = get_sample_QIDs()\n for qid in sample_QIDs:\n Wikidata_item = module0_formula_and_identifier_retrieval \\\n .get_Wikidata_item(qid)\n\n self.assertIsNotNone(Wikidata_item)\n #self.assertIsNotNone(module0_formula_and_identifier_retrieval\n # .get_concept_name(Wikidata_item))\n\n # TEST MODULE1\n def test_module1(self):\n # TODO: insert code for unit test of module1 here\n pass\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"pandas.read_csv"
]
] |
bertelschmitt/multistreamYOLO | [
"827a1d2ae11653fe5fde2cee3b52cda8baae9899"
] | [
"2_Training/Train_YOLO.py"
] | [
"\n\"\"\"\nMODIFIED FROM keras-yolo3 PACKAGE, https://github.com/qqwweee/keras-yolo3\nRetrain the YOLO model for your own dataset.\n\n10-26-20 MODIFIED by bertelschmitt to use new repo name if changed to something else than \"TrainYourOwnYOLO\"\n10-31-20 UPDATED by bertelschmitt to reflect TrainYourOwnYOLO versions as of 10-31-20\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport warnings\n\n\ndef get_parent_dir(n=1):\n \"\"\"returns the n-th parent dicrectory of the current\n working directory\"\"\"\n current_path = os.path.dirname(os.path.abspath(__file__))\n for _ in range(n):\n current_path = os.path.dirname(current_path)\n return current_path\n\n\nsrc_path = os.path.join(get_parent_dir(0), \"src\")\nsys.path.append(src_path)\n\nutils_path = os.path.join(get_parent_dir(1), \"Utils\")\nsys.path.append(utils_path)\n\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Input, Lambda\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import (\n TensorBoard,\n ModelCheckpoint,\n ReduceLROnPlateau,\n EarlyStopping,\n)\nfrom keras_yolo3.yolo3.model import (\n preprocess_true_boxes,\n yolo_body,\n tiny_yolo_body,\n yolo_loss,\n)\nfrom keras_yolo3.yolo3.utils import get_random_data\nfrom PIL import Image\nfrom time import time\nimport tensorflow.compat.v1 as tf\nimport pickle\n\nfrom Train_Utils import (\n get_classes,\n get_anchors,\n create_model,\n create_tiny_model,\n data_generator,\n data_generator_wrapper,\n ChangeToOtherMachine,\n)\n\n\nkeras_path = os.path.join(src_path, \"keras_yolo3\")\nData_Folder = os.path.join(get_parent_dir(1), \"Data\")\nImage_Folder = os.path.join(Data_Folder, \"Source_Images\", \"Training_Images\")\nVoTT_Folder = os.path.join(Image_Folder, \"vott-csv-export\")\nYOLO_filename = os.path.join(VoTT_Folder, \"data_train.txt\")\n\nModel_Folder = os.path.join(Data_Folder, \"Model_Weights\")\nYOLO_classname = os.path.join(Model_Folder, \"data_classes.txt\")\n\nlog_dir = Model_Folder\nanchors_path = os.path.join(keras_path, \"model_data\", \"yolo_anchors.txt\")\nweights_path = os.path.join(keras_path, \"yolo.h5\")\n#10-26-20 get name of current repo, which should be the directory one down from ours\ncurrent_repo = get_parent_dir(1).rsplit('/', 1)[1]\nFLAGS = None\n\nif __name__ == \"__main__\":\n # Delete all default flags\n parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)\n \"\"\"\n Command line options\n \"\"\"\n\n parser.add_argument(\n \"--annotation_file\",\n type=str,\n default=YOLO_filename,\n help=\"Path to annotation file for Yolo. Default is \" + YOLO_filename,\n )\n parser.add_argument(\n \"--classes_file\",\n type=str,\n default=YOLO_classname,\n help=\"Path to YOLO classnames. Default is \" + YOLO_classname,\n )\n\n parser.add_argument(\n \"--log_dir\",\n type=str,\n default=log_dir,\n help=\"Folder to save training logs and trained weights to. Default is \"\n + log_dir,\n )\n\n parser.add_argument(\n \"--anchors_path\",\n type=str,\n default=anchors_path,\n help=\"Path to YOLO anchors. Default is \" + anchors_path,\n )\n\n parser.add_argument(\n \"--weights_path\",\n type=str,\n default=weights_path,\n help=\"Path to pre-trained YOLO weights. Default is \" + weights_path,\n )\n parser.add_argument(\n \"--val_split\",\n type=float,\n default=0.1,\n help=\"Percentage of training set to be used for validation. Default is 10%.\",\n )\n parser.add_argument(\n \"--is_tiny\",\n default=False,\n action=\"store_true\",\n help=\"Use the tiny Yolo version for better performance and less accuracy. Default is False.\",\n )\n parser.add_argument(\n \"--random_seed\",\n type=float,\n default=None,\n help=\"Random seed value to make script deterministic. Default is 'None', i.e. non-deterministic.\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=51,\n help=\"Number of epochs for training last layers and number of epochs for fine-tuning layers. Default is 51.\",\n )\n parser.add_argument(\n \"--warnings\",\n default=False,\n action=\"store_true\",\n help=\"Display warning messages. Default is False.\",\n )\n\n FLAGS = parser.parse_args()\n\n if not FLAGS.warnings:\n tf.logging.set_verbosity(tf.logging.ERROR)\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n warnings.filterwarnings(\"ignore\")\n\n #Backported w/o change 10/31/20 from TrainYourOwnYOLO version as of 10/31/20 by BS\n\t# Get WandB integration if setup\n try:\n import wandb\n from wandb.integration.keras import WandbCallback # type: ignore\n\n wandb.ensure_configured()\n if wandb.api.api_key is None:\n _has_wandb = False\n wandb.termwarn(\n \"W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.\"\n )\n else:\n _has_wandb = False if os.getenv(\"WANDB_DISABLED\") else True\n except (ImportError, AttributeError):\n _has_wandb = False\n\t\t\t\t\t\t\t\t\n np.random.seed(FLAGS.random_seed)\n\n log_dir = FLAGS.log_dir\n\n class_names = get_classes(FLAGS.classes_file)\n num_classes = len(class_names)\n\n if FLAGS.is_tiny and FLAGS.weights_path == weights_path:\n weights_path = os.path.join(os.path.dirname(FLAGS.weights_path), \"yolo-tiny.h5\")\n if FLAGS.is_tiny and FLAGS.anchors_path == anchors_path:\n anchors_path = os.path.join(\n os.path.dirname(FLAGS.anchors_path), \"yolo-tiny_anchors.txt\"\n )\n anchors = get_anchors(anchors_path)\n\n input_shape = (416, 416) # multiple of 32, height, width\n epoch1, epoch2 = FLAGS.epochs, FLAGS.epochs\n\n is_tiny_version = len(anchors) == 6 # default setting\n if FLAGS.is_tiny:\n model = create_tiny_model(\n input_shape, anchors, num_classes, freeze_body=2, weights_path=weights_path\n )\n else:\n model = create_model(\n input_shape, anchors, num_classes, freeze_body=2, weights_path=weights_path\n ) # make sure you know what you freeze\n\n log_dir_time = os.path.join(log_dir, \"{}\".format(int(time())))\n logging = TensorBoard(log_dir=log_dir_time)\n checkpoint = ModelCheckpoint(\n os.path.join(log_dir, \"checkpoint.h5\"),\n monitor=\"val_loss\",\n save_weights_only=True,\n save_best_only=True,\n period=5,\n )\n reduce_lr = ReduceLROnPlateau(monitor=\"val_loss\", factor=0.1, patience=3, verbose=1)\n early_stopping = EarlyStopping(\n monitor=\"val_loss\", min_delta=0, patience=10, verbose=1\n )\n\n val_split = FLAGS.val_split\n with open(FLAGS.annotation_file) as f:\n lines = f.readlines()\n\n # This step makes sure that the path names correspond to the local machine\n # This is important if annotation and training are done on different machines (e.g. training on AWS)\n\t# 10-26-20 Changed by bertelschmitt to call with current_repo\n lines = ChangeToOtherMachine(lines, remote_machine=\"\", repo=current_repo)\n np.random.shuffle(lines)\n num_val = int(len(lines) * val_split)\n num_train = len(lines) - num_val\n\t\n\t# From here on down, all backported w/o change 10/31/20 from TrainYourOwnYOLO version as of 10/31/20 by BS \n # Train with frozen layers first, to get a stable loss.\n # Adjust num epochs to your dataset. This step is enough to obtain a decent model.\n frozen_callbacks = [logging, checkpoint]\n\n if _has_wandb:\n wandb.init(\n project=\"TrainYourOwnYOLO\", config=vars(FLAGS), sync_tensorboard=False\n )\n wandb_callback = WandbCallback(save_model=False)\n frozen_callbacks.append(wandb_callback)\n\n model.compile(\n optimizer=Adam(lr=1e-3),\n loss={\n # use custom yolo_loss Lambda layer.\n \"yolo_loss\": lambda y_true, y_pred: y_pred\n },\n )\n\n batch_size = 32\n print(\n \"Train on {} samples, val on {} samples, with batch size {}.\".format(\n num_train, num_val, batch_size\n )\n )\n history = model.fit_generator(\n data_generator_wrapper(\n lines[:num_train], batch_size, input_shape, anchors, num_classes\n ),\n steps_per_epoch=max(1, num_train // batch_size),\n validation_data=data_generator_wrapper(\n lines[num_train:], batch_size, input_shape, anchors, num_classes\n ),\n validation_steps=max(1, num_val // batch_size),\n epochs=epoch1,\n initial_epoch=0,\n callbacks=frozen_callbacks,\n )\n model.save_weights(os.path.join(log_dir, \"trained_weights_stage_1.h5\"))\n\n # Unfreeze and continue training, to fine-tune.\n # Train longer if the result is unsatisfactory.\n\n full_callbacks = [logging, checkpoint, reduce_lr, early_stopping]\n\n if _has_wandb:\n full_callbacks.append(wandb_callback)\n\n for i in range(len(model.layers)):\n model.layers[i].trainable = True\n model.compile(\n optimizer=Adam(lr=1e-4), loss={\"yolo_loss\": lambda y_true, y_pred: y_pred}\n ) # recompile to apply the change\n\n print(\"Unfreeze all layers.\")\n\n batch_size = 4 # note that more GPU memory is required after unfreezing the body\n print(\n \"Train on {} samples, val on {} samples, with batch size {}.\".format(\n num_train, num_val, batch_size\n )\n )\n history = model.fit_generator(\n data_generator_wrapper(\n lines[:num_train], batch_size, input_shape, anchors, num_classes\n ),\n steps_per_epoch=max(1, num_train // batch_size),\n validation_data=data_generator_wrapper(\n lines[num_train:], batch_size, input_shape, anchors, num_classes\n ),\n validation_steps=max(1, num_val // batch_size),\n epochs=epoch1 + epoch2,\n initial_epoch=epoch1,\n callbacks=full_callbacks,\n )\n model.save_weights(os.path.join(log_dir, \"trained_weights_final.h5\"))\n"
] | [
[
"numpy.random.shuffle",
"numpy.random.seed",
"tensorflow.compat.v1.logging.set_verbosity"
]
] |
limeng357/Paddle | [
"dbd25805c88c48998eb9dc0f4b2ca1fd46326482"
] | [
"python/paddle/fluid/tests/unittests/test_bipartite_match_op.py"
] | [
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\n\n\ndef bipartite_match(distance, match_indices, match_dist):\n \"\"\"Bipartite Matching algorithm.\n Arg:\n distance (numpy.array) : The distance of two entries with shape [M, N].\n match_indices (numpy.array): the matched indices from column to row\n with shape [1, N], it must be initialized to -1.\n match_dist (numpy.array): The matched distance from column to row\n with shape [1, N], it must be initialized to 0.\n \"\"\"\n match_pair = []\n row, col = distance.shape\n for i in range(row):\n for j in range(col):\n match_pair.append((i, j, distance[i][j]))\n\n match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True)\n\n row_indices = -1 * np.ones((row, ), dtype=np.int)\n\n idx = 0\n for i, j, dist in match_sorted:\n if idx >= row:\n break\n if match_indices[j] == -1 and row_indices[i] == -1 and dist > 0:\n match_indices[j] = i\n row_indices[i] = j\n match_dist[j] = dist\n idx += 1\n\n\ndef argmax_match(distance, match_indices, match_dist, threshold):\n r, c = distance.shape\n for j in xrange(c):\n if match_indices[j] != -1:\n continue\n col_dist = distance[:, j]\n indices = np.argwhere(col_dist >= threshold).flatten()\n if len(indices) < 1:\n continue\n match_indices[j] = indices[np.argmax(col_dist[indices])]\n match_dist[j] = col_dist[match_indices[j]]\n\n\ndef batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None):\n \"\"\"Bipartite Matching algorithm for batch input.\n Arg:\n distance (numpy.array) : The distance of two entries with shape [M, N].\n lod (list of int): The offsets of each input in this batch.\n \"\"\"\n n = len(lod) - 1\n m = distance.shape[1]\n match_indices = -1 * np.ones((n, m), dtype=np.int)\n match_dist = np.zeros((n, m), dtype=np.float32)\n for i in range(len(lod) - 1):\n bipartite_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :],\n match_dist[i, :])\n if match_type == 'per_prediction':\n argmax_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :],\n match_dist[i, :], dist_threshold)\n return match_indices, match_dist\n\n\nclass TestBipartiteMatchOpWithLoD(OpTest):\n def setUp(self):\n self.op_type = 'bipartite_match'\n lod = [[0, 5, 11, 23]]\n dist = np.random.random((23, 217)).astype('float32')\n match_indices, match_dist = batch_bipartite_match(dist, lod[0])\n\n self.inputs = {'DistMat': (dist, lod)}\n self.outputs = {\n 'ColToRowMatchIndices': match_indices,\n 'ColToRowMatchDist': match_dist,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestBipartiteMatchOpWithoutLoD(OpTest):\n def setUp(self):\n self.op_type = 'bipartite_match'\n lod = [[0, 8]]\n dist = np.random.random((8, 17)).astype('float32')\n match_indices, match_dist = batch_bipartite_match(dist, lod[0])\n\n self.inputs = {'DistMat': dist}\n self.outputs = {\n 'ColToRowMatchIndices': match_indices,\n 'ColToRowMatchDist': match_dist,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestBipartiteMatchOpWithPerPredictionType(OpTest):\n def setUp(self):\n self.op_type = 'bipartite_match'\n lod = [[0, 5, 11, 23]]\n dist = np.random.random((23, 237)).astype('float32')\n match_indices, match_dist = batch_bipartite_match(dist, lod[0],\n 'per_prediction', 0.5)\n\n self.inputs = {'DistMat': (dist, lod)}\n self.outputs = {\n 'ColToRowMatchIndices': match_indices,\n 'ColToRowMatchDist': match_dist,\n }\n self.attrs = {\n 'match_type': 'per_prediction',\n 'dist_threshold': 0.5,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.ones",
"numpy.argwhere",
"numpy.zeros",
"numpy.argmax",
"numpy.random.random"
]
] |
Felix-neko/catalyst | [
"df80986f1c12ef6a3776637453a0c04aaef0068c"
] | [
"catalyst/rl/scripts/run_samplers.py"
] | [
"#!/usr/bin/env python\n\nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\n\nimport copy # noqa E402\nimport time # noqa E402\nimport atexit # noqa E402\nimport argparse # noqa E402\nimport multiprocessing as mp # noqa E402\n\nimport torch # noqa E402\ntorch.set_num_threads(1)\n\nfrom catalyst.rl.core import Sampler, ValidSampler, \\\n ExplorationHandler # noqa E402\nfrom catalyst.rl.registry import \\\n OFFPOLICY_ALGORITHMS, ONPOLICY_ALGORITHMS, \\\n ENVIRONMENTS, DATABASES # noqa E402\nfrom catalyst.rl.scripts.misc import OFFPOLICY_ALGORITHMS_NAMES, \\\n ONPOLICY_ALGORITHMS_NAMES # noqa E402\nfrom catalyst.utils.config import parse_args_uargs # noqa E402\nfrom catalyst.utils import set_global_seed, boolean_flag # noqa E402\nfrom catalyst.utils.scripts import import_module # noqa E402\n\n\ndef build_args(parser):\n parser.add_argument(\n \"--config\",\n \"--configs\",\n \"-C\",\n nargs=\"+\",\n help=\"path to config/configs\",\n metavar=\"CONFIG_PATH\",\n dest=\"configs\",\n required=True\n )\n parser.add_argument(\"--expdir\", type=str, default=None)\n parser.add_argument(\"--logdir\", type=str, default=None)\n parser.add_argument(\"--resume\", type=str, default=None)\n parser.add_argument(\"--seed\", type=int, default=42)\n\n parser.add_argument(\"--train\", type=int, default=None)\n parser.add_argument(\"--valid\", type=int, default=None)\n parser.add_argument(\"--infer\", type=int, default=None)\n parser.add_argument(\"--vis\", type=int, default=None)\n\n boolean_flag(parser, \"check\", default=False)\n boolean_flag(parser, \"db\", default=True)\n\n parser.add_argument(\"--run-delay\", type=int, default=1)\n boolean_flag(parser, \"daemon\", default=True)\n parser.add_argument(\"--sampler-id\", type=int, default=0)\n\n return parser\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n build_args(parser)\n args, unknown_args = parser.parse_known_args()\n return args, unknown_args\n\n\ndef run_sampler(\n *,\n config,\n logdir,\n algorithm_fn,\n environment_fn,\n visualize,\n mode,\n seed=42,\n id=None,\n resume=None,\n db=True,\n exploration_power=1.0,\n sync_epoch=False\n):\n config_ = copy.deepcopy(config)\n id = 0 if id is None else id\n seed = seed + id\n set_global_seed(seed)\n\n db_server = DATABASES.get_from_params(\n **config.get(\"db\", {}), sync_epoch=sync_epoch\n ) if db else None\n\n env = environment_fn(\n **config_[\"environment\"],\n visualize=visualize,\n mode=mode,\n sampler_id=id,\n )\n agent = algorithm_fn.prepare_for_sampler(env_spec=env, config=config_)\n\n exploration_params = config_[\"sampler\"].pop(\"exploration_params\", None)\n exploration_handler = ExplorationHandler(env=env, *exploration_params) \\\n if exploration_params is not None \\\n else None\n if exploration_handler is not None:\n exploration_handler.set_power(exploration_power)\n\n seeds = dict(\n (k, config_[\"sampler\"].pop(f\"{k}_seeds\", None))\n for k in [\"train\", \"valid\", \"infer\"]\n )\n seeds = seeds[mode]\n\n if algorithm_fn in OFFPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"critic\" if env.discrete_actions else \"actor\"\n elif algorithm_fn in ONPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"actor\"\n else:\n # @TODO: add registry for algorithms, trainers, samplers\n raise NotImplementedError()\n\n if mode in [\"valid\"]:\n sampler_fn = ValidSampler\n else:\n sampler_fn = Sampler\n\n sampler = sampler_fn(\n agent=agent,\n env=env,\n db_server=db_server,\n exploration_handler=exploration_handler,\n logdir=logdir,\n id=id,\n mode=mode,\n weights_sync_mode=weights_sync_mode,\n seeds=seeds,\n **config_[\"sampler\"],\n )\n\n if resume is not None:\n sampler.load_checkpoint(filepath=resume)\n\n sampler.run()\n\n\ndef main(args, unknown_args):\n args, config = parse_args_uargs(args, unknown_args)\n\n args.vis = args.vis or 0\n args.infer = args.infer or 0\n args.valid = args.valid or 0\n args.train = args.train or 0\n\n if args.expdir is not None:\n module = import_module(expdir=args.expdir) # noqa: F841\n\n environment_name = config[\"environment\"].pop(\"environment\")\n environment_fn = ENVIRONMENTS.get(environment_name)\n\n algorithm_name = config[\"algorithm\"].pop(\"algorithm\")\n\n if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = OFFPOLICY_ALGORITHMS\n sync_epoch = False\n elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = ONPOLICY_ALGORITHMS\n sync_epoch = True\n else:\n raise NotImplementedError()\n\n algorithm_fn = ALGORITHMS.get(algorithm_name)\n\n processes = []\n sampler_id = args.sampler_id\n\n def on_exit():\n for p in processes:\n p.terminate()\n\n atexit.register(on_exit)\n\n params = dict(\n seed=args.seed,\n logdir=args.logdir,\n algorithm_fn=algorithm_fn,\n environment_fn=environment_fn,\n config=config,\n resume=args.resume,\n db=args.db,\n sync_epoch=sync_epoch\n )\n\n if args.check:\n mode = \"train\"\n mode = \"valid\" if (args.valid is not None and args.valid > 0) else mode\n mode = \"infer\" if (args.infer is not None and args.infer > 0) else mode\n params_ = dict(\n visualize=(args.vis is not None and args.vis > 0),\n mode=mode,\n id=sampler_id\n )\n run_sampler(**params, **params_)\n\n for i in range(args.vis):\n params_ = dict(\n visualize=True, mode=\"infer\", id=sampler_id, exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.infer):\n params_ = dict(\n visualize=False,\n mode=\"infer\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.valid):\n params_ = dict(\n visualize=False,\n mode=\"valid\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(1, args.train + 1):\n exploration_power = i / args.train\n params_ = dict(\n visualize=False,\n mode=\"train\",\n id=sampler_id,\n exploration_power=exploration_power\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for p in processes:\n p.join()\n\n\nif __name__ == \"__main__\":\n args, unknown_args = parse_args()\n main(args, unknown_args)\n"
] | [
[
"torch.set_num_threads"
]
] |
jahau/addons | [
"11b842781b0f022830f35f2e6ee1cc93c80abe50"
] | [
"tensorflow_addons/image/interpolate_spline.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Polyharmonic spline interpolation.\"\"\"\n\nimport tensorflow as tf\n\nEPSILON = 0.0000000001\n\n\ndef _cross_squared_distance_matrix(x, y):\n \"\"\"Pairwise squared distance between two (batch) matrices' rows (2nd dim).\n\n Computes the pairwise distances between rows of x and rows of y\n Args:\n x: [batch_size, n, d] float `Tensor`\n y: [batch_size, m, d] float `Tensor`\n\n Returns:\n squared_dists: [batch_size, n, m] float `Tensor`, where\n squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2\n \"\"\"\n x_norm_squared = tf.reduce_sum(tf.square(x), 2)\n y_norm_squared = tf.reduce_sum(tf.square(y), 2)\n\n # Expand so that we can broadcast.\n x_norm_squared_tile = tf.expand_dims(x_norm_squared, 2)\n y_norm_squared_tile = tf.expand_dims(y_norm_squared, 1)\n\n x_y_transpose = tf.matmul(x, y, adjoint_b=True)\n\n # squared_dists[b,i,j] = ||x_bi - y_bj||^2 =\n # x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj\n squared_dists = (\n x_norm_squared_tile - 2 * x_y_transpose + y_norm_squared_tile)\n\n return squared_dists\n\n\ndef _pairwise_squared_distance_matrix(x):\n \"\"\"Pairwise squared distance among a (batch) matrix's rows (2nd dim).\n\n This saves a bit of computation vs. using\n _cross_squared_distance_matrix(x,x)\n\n Args:\n x: `[batch_size, n, d]` float `Tensor`\n\n Returns:\n squared_dists: `[batch_size, n, n]` float `Tensor`, where\n squared_dists[b,i,j] = ||x[b,i,:] - x[b,j,:]||^2\n \"\"\"\n\n x_x_transpose = tf.matmul(x, x, adjoint_b=True)\n x_norm_squared = tf.linalg.diag_part(x_x_transpose)\n x_norm_squared_tile = tf.expand_dims(x_norm_squared, 2)\n\n # squared_dists[b,i,j] = ||x_bi - x_bj||^2 =\n # = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj\n squared_dists = x_norm_squared_tile - 2 * x_x_transpose + tf.transpose(\n x_norm_squared_tile, [0, 2, 1])\n\n return squared_dists\n\n\ndef _solve_interpolation(train_points, train_values, order,\n regularization_weight):\n \"\"\"Solve for interpolation coefficients.\n\n Computes the coefficients of the polyharmonic interpolant for the\n 'training' data defined by (train_points, train_values) using the kernel\n phi.\n\n Args:\n train_points: `[b, n, d]` interpolation centers\n train_values: `[b, n, k]` function values\n order: order of the interpolation\n regularization_weight: weight to place on smoothness regularization term\n\n Returns:\n w: `[b, n, k]` weights on each interpolation center\n v: `[b, d, k]` weights on each input dimension\n Raises:\n ValueError: if d or k is not fully specified.\n \"\"\"\n\n # These dimensions are set dynamically at runtime.\n b, n, _ = tf.unstack(tf.shape(train_points), num=3)\n\n d = train_points.shape[-1]\n if d is None:\n raise ValueError('The dimensionality of the input points (d) must be '\n 'statically-inferrable.')\n\n k = train_values.shape[-1]\n if k is None:\n raise ValueError('The dimensionality of the output values (k) must be '\n 'statically-inferrable.')\n\n # First, rename variables so that the notation (c, f, w, v, A, B, etc.)\n # follows https://en.wikipedia.org/wiki/Polyharmonic_spline.\n # To account for python style guidelines we use\n # matrix_a for A and matrix_b for B.\n\n c = train_points\n f = train_values\n\n # Next, construct the linear system.\n with tf.name_scope('construct_linear_system'):\n\n matrix_a = _phi(_pairwise_squared_distance_matrix(c),\n order) # [b, n, n]\n if regularization_weight > 0:\n batch_identity_matrix = tf.expand_dims(tf.eye(n, dtype=c.dtype), 0)\n matrix_a += regularization_weight * batch_identity_matrix\n\n # Append ones to the feature values for the bias term\n # in the linear model.\n ones = tf.ones_like(c[..., :1], dtype=c.dtype)\n matrix_b = tf.concat([c, ones], 2) # [b, n, d + 1]\n\n # [b, n + d + 1, n]\n left_block = tf.concat(\n [matrix_a, tf.transpose(matrix_b, [0, 2, 1])], 1)\n\n num_b_cols = matrix_b.get_shape()[2] # d + 1\n lhs_zeros = tf.zeros([b, num_b_cols, num_b_cols], train_points.dtype)\n right_block = tf.concat([matrix_b, lhs_zeros],\n 1) # [b, n + d + 1, d + 1]\n lhs = tf.concat([left_block, right_block],\n 2) # [b, n + d + 1, n + d + 1]\n\n rhs_zeros = tf.zeros([b, d + 1, k], train_points.dtype)\n rhs = tf.concat([f, rhs_zeros], 1) # [b, n + d + 1, k]\n\n # Then, solve the linear system and unpack the results.\n with tf.name_scope('solve_linear_system'):\n w_v = tf.linalg.solve(lhs, rhs)\n w = w_v[:, :n, :]\n v = w_v[:, n:, :]\n\n return w, v\n\n\ndef _apply_interpolation(query_points, train_points, w, v, order):\n \"\"\"Apply polyharmonic interpolation model to data.\n\n Given coefficients w and v for the interpolation model, we evaluate\n interpolated function values at query_points.\n\n Args:\n query_points: `[b, m, d]` x values to evaluate the interpolation at\n train_points: `[b, n, d]` x values that act as the interpolation centers\n ( the c variables in the wikipedia article)\n w: `[b, n, k]` weights on each interpolation center\n v: `[b, d, k]` weights on each input dimension\n order: order of the interpolation\n\n Returns:\n Polyharmonic interpolation evaluated at points defined in query_points.\n \"\"\"\n\n # First, compute the contribution from the rbf term.\n pairwise_dists = _cross_squared_distance_matrix(query_points, train_points)\n phi_pairwise_dists = _phi(pairwise_dists, order)\n\n rbf_term = tf.matmul(phi_pairwise_dists, w)\n\n # Then, compute the contribution from the linear term.\n # Pad query_points with ones, for the bias term in the linear model.\n query_points_pad = tf.concat([\n query_points,\n tf.ones_like(query_points[..., :1], train_points.dtype)\n ], 2)\n linear_term = tf.matmul(query_points_pad, v)\n\n return rbf_term + linear_term\n\n\ndef _phi(r, order):\n \"\"\"Coordinate-wise nonlinearity used to define the order of the\n interpolation.\n\n See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.\n\n Args:\n r: input op\n order: interpolation order\n\n Returns:\n phi_k evaluated coordinate-wise on r, for k = r\n \"\"\"\n\n # using EPSILON prevents log(0), sqrt0), etc.\n # sqrt(0) is well-defined, but its gradient is not\n with tf.name_scope('phi'):\n if order == 1:\n r = tf.maximum(r, EPSILON)\n r = tf.sqrt(r)\n return r\n elif order == 2:\n return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON))\n elif order == 4:\n return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON))\n elif order % 2 == 0:\n r = tf.maximum(r, EPSILON)\n return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r)\n else:\n r = tf.maximum(r, EPSILON)\n return tf.pow(r, 0.5 * order)\n\n\ndef interpolate_spline(train_points,\n train_values,\n query_points,\n order,\n regularization_weight=0.0,\n name='interpolate_spline'):\n r\"\"\"Interpolate signal using polyharmonic interpolation.\n\n The interpolant has the form\n $$f(x) = \\sum_{i = 1}^n w_i \\phi(||x - c_i||) + v^T x + b.$$\n\n This is a sum of two terms: (1) a weighted sum of radial basis function\n (RBF) terms, with the centers \\\\(c_1, ... c_n\\\\), and (2) a linear term\n with a bias. The \\\\(c_i\\\\) vectors are 'training' points.\n In the code, b is absorbed into v\n by appending 1 as a final dimension to x. The coefficients w and v are\n estimated such that the interpolant exactly fits the value of the function\n at the \\\\(c_i\\\\) points, the vector w is orthogonal to each \\\\(c_i\\\\),\n and the vector w sums to 0. With these constraints, the coefficients\n can be obtained by solving a linear system.\n\n \\\\(\\phi\\\\) is an RBF, parametrized by an interpolation\n order. Using order=2 produces the well-known thin-plate spline.\n\n We also provide the option to perform regularized interpolation. Here, the\n interpolant is selected to trade off between the squared loss on the\n training data and a certain measure of its curvature\n ([details](https://en.wikipedia.org/wiki/Polyharmonic_spline)).\n Using a regularization weight greater than zero has the effect that the\n interpolant will no longer exactly fit the training data. However, it may\n be less vulnerable to overfitting, particularly for high-order\n interpolation.\n\n Note the interpolation procedure is differentiable with respect to all\n inputs besides the order parameter.\n\n We support dynamically-shaped inputs, where batch_size, n, and m are None\n at graph construction time. However, d and k must be known.\n\n Args:\n train_points: `[batch_size, n, d]` float `Tensor` of n d-dimensional\n locations. These do not need to be regularly-spaced.\n train_values: `[batch_size, n, k]` float `Tensor` of n c-dimensional\n values evaluated at train_points.\n query_points: `[batch_size, m, d]` `Tensor` of m d-dimensional locations\n where we will output the interpolant's values.\n order: order of the interpolation. Common values are 1 for\n \\\\(\\phi(r) = r\\\\), 2 for \\\\(\\phi(r) = r^2 * log(r)\\\\)\n (thin-plate spline), or 3 for \\\\(\\phi(r) = r^3\\\\).\n regularization_weight: weight placed on the regularization term.\n This will depend substantially on the problem, and it should always be\n tuned. For many problems, it is reasonable to use no regularization.\n If using a non-zero value, we recommend a small value like 0.001.\n name: name prefix for ops created by this function\n\n Returns:\n `[b, m, k]` float `Tensor` of query values. We use train_points and\n train_values to perform polyharmonic interpolation. The query values are\n the values of the interpolant evaluated at the locations specified in\n query_points.\n \"\"\"\n with tf.name_scope(name or \"interpolate_spline\"):\n train_points = tf.convert_to_tensor(train_points)\n train_values = tf.convert_to_tensor(train_values)\n query_points = tf.convert_to_tensor(query_points)\n\n # First, fit the spline to the observed data.\n with tf.name_scope('solve'):\n w, v = _solve_interpolation(train_points, train_values, order,\n regularization_weight)\n\n # Then, evaluate the spline at the query locations.\n with tf.name_scope('predict'):\n query_values = _apply_interpolation(query_points, train_points, w,\n v, order)\n\n return query_values\n"
] | [
[
"tensorflow.zeros",
"tensorflow.math.log",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.sqrt",
"tensorflow.expand_dims",
"tensorflow.eye",
"tensorflow.pow",
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.linalg.solve",
"tensorflow.square",
"tensorflow.linalg.diag_part",
"tensorflow.concat",
"tensorflow.convert_to_tensor",
"tensorflow.transpose",
"tensorflow.maximum"
]
] |
awesome-archive/pycorrector | [
"022da83ab794d9f9ddc40caef67b0578e7e3f513"
] | [
"pycorrector/seq2seq/infer.py"
] | [
"# -*- coding: utf-8 -*-\n# Author: XuMing <[email protected]>\n# Brief: \n\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import load_model\n\nfrom pycorrector.seq2seq import cged_config as config\nfrom pycorrector.seq2seq.corpus_reader import CGEDReader, load_word_dict\nfrom pycorrector.seq2seq.reader import EOS_TOKEN, GO_TOKEN\nfrom pycorrector.utils.io_utils import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Infer(object):\n def __init__(self, config=None):\n train_path = config.train_path\n encoder_model_path = config.encoder_model_path\n decoder_model_path = config.decoder_model_path\n save_input_token_path = config.input_vocab_path\n save_target_token_path = config.target_vocab_path\n\n # load dict\n self.input_token_index = load_word_dict(save_input_token_path)\n self.target_token_index = load_word_dict(save_target_token_path)\n\n data_reader = CGEDReader(train_path)\n input_texts, target_texts = data_reader.build_dataset(train_path)\n self.max_input_texts_len = max([len(text) for text in input_texts])\n self.max_target_texts_len = max([len(text) for text in target_texts])\n logger.info(\"Data loaded.\")\n\n # load model\n self.encoder_model = load_model(encoder_model_path)\n self.decoder_model = load_model(decoder_model_path)\n logger.info(\"Loaded seq2seq model.\")\n self.graph = tf.get_default_graph()\n\n def _decode_sequence(self, encoder_input_data):\n decoded_sentence = ''\n with self.graph.as_default():\n # Encode the input as state vectors.\n states_value = self.encoder_model.predict(encoder_input_data)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1, 1, len(self.target_token_index)))\n # Populate the first character of target sequence with the start character.\n # first_char = encoder_input_data[0]\n target_seq[0, 0, self.target_token_index[GO_TOKEN]] = 1.0\n\n reverse_target_char_index = dict(\n (i, char) for char, i in self.target_token_index.items())\n\n for _ in range(self.max_target_texts_len):\n output_tokens, h, c = self.decoder_model.predict([target_seq] + states_value)\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_char = reverse_target_char_index[sampled_token_index]\n # Exit condition: either hit max length\n # or find stop character.\n if sampled_char == EOS_TOKEN:\n break\n decoded_sentence += sampled_char\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1, len(self.target_token_index)))\n target_seq[0, 0, sampled_token_index] = 1.0\n # Update states\n states_value = [h, c]\n return decoded_sentence\n\n def infer(self, input_text):\n encoder_input_data = np.zeros((1, self.max_input_texts_len, len(self.input_token_index)),\n dtype='float32')\n # one hot representation\n for i, char in enumerate(input_text):\n if char in self.input_token_index:\n encoder_input_data[0, i, self.input_token_index[char]] = 1.0\n # Take one sequence decoding.\n decoded_sentence = self._decode_sequence(encoder_input_data)\n logger.info('Input sentence:%s' % input_text)\n logger.info('Decoded sentence:%s' % decoded_sentence)\n\n\nif __name__ == \"__main__\":\n inference = Infer(config=config)\n inputs = [\n '由我起开始做。',\n '没有解决这个问题,',\n '由我起开始做。',\n '由我起开始做',\n '不能人类实现更美好的将来。',\n '这几年前时间,',\n '歌曲使人的感到快乐,',\n ]\n for i in inputs:\n inference.infer(i)\n\n while True:\n input_str = input('input your string:')\n inference.infer(input_str)\n"
] | [
[
"numpy.argmax",
"tensorflow.get_default_graph"
]
] |
rahulvigneswaran/TailCalibX | [
"0ed18cc8903715c0e31934c54226a53b1bbfc198"
] | [
"libs/models/DotProductClassifier.py"
] | [
"# Imports\nimport torch.nn as nn\nfrom os import path\nimport torch\nimport torch.nn.functional as F\n\nclass DotProduct_Classifier(nn.Module):\n def __init__(self, num_classes=1000, feat_dim=2048, *args):\n super(DotProduct_Classifier, self).__init__()\n self.fc = nn.Linear(feat_dim, num_classes)\n\n def forward(self, x, *args):\n x = self.fc(x)\n return x\n\n\ndef create_model(feat_dim, num_classes=1000, pretrain=False, pretrain_dir=None, *args):\n \"\"\"Initialize the model\n\n Args:\n feat_dim (int): output dimension of the previous feature extractor\n num_classes (int, optional): Number of classes. Defaults to 1000.\n\n Returns:\n Class: Model\n \"\"\"\n print(\"Loading Dot Product Classifier.\")\n clf = DotProduct_Classifier(num_classes, feat_dim)\n\n if pretrain:\n if path.exists(pretrain_dir):\n print(\"===> Load Pretrain Initialization for DotProductClassfier\")\n weights = torch.load(pretrain_dir)[\"state_dict_best\"][\"classifier\"]\n\n weights = {\n k: weights[\"module.\" + k]\n if \"module.\" + k in weights\n else clf.state_dict()[k]\n for k in clf.state_dict()\n }\n clf.load_state_dict(weights)\n else: \n raise Exception(f\"Pretrain path doesn't exist!!--{pretrain_dir}\")\n else:\n print(\"===> Train classifier from the scratch\")\n\n return clf\n"
] | [
[
"torch.nn.Linear",
"torch.load"
]
] |
Pavivenkatesan/TicTacToe-RL-MM- | [
"fbaab6bb9574b82ae0d79c818ba74d049375bfd4"
] | [
"testing.py"
] | [
"import numpy as np\nfrom math import inf as infinity\nfrom itertools import product\nfrom collections import defaultdict\nimport random\nimport time\n\n# Initializing the Tic-Tac-Toe environment\n# Three rows-Three columns, creating an empty list of three empty lists\nstate_space = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]\n# No. of players = 2 : X & O\nplayers = ['X', 'O']\n\n\n# Defining the play state_value, player and the cell number\ndef play(sv, each_player, cell):\n if sv[int((cell - 1) / 3)][(cell - 1) % 3] is ' ':\n sv[int((cell - 1) / 3)][(cell - 1) % 3] = each_player\n else:\n cell = int(input(\" Choose again, Cell is not empty: \"))\n play(sv, each_player, cell)\n\n\n# Defining new state function: which traverse over rows and columns and returns new state\ndef new(state):\n ns = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]\n for i in range(3):\n for j in range(3):\n ns[i][j] = state[i][j]\n return ns\n\n\n# Determining the current state value and determining the win\ndef cur_state(state_space):\n if (state_space[0][0] == state_space[0][1] and state_space[0][1] == state_space[0][2] and state_space[0][\n 0] is not ' '):\n return state_space[0][0], \"Done\"\n if (state_space[1][0] == state_space[1][1] and state_space[1][1] == state_space[1][2] and state_space[1][\n 0] is not ' '):\n return state_space[1][0], \"Done\"\n if (state_space[2][0] == state_space[2][1] and state_space[2][1] == state_space[2][2] and state_space[2][\n 0] is not ' '):\n return state_space[2][0], \"Done\"\n\n if (state_space[0][0] == state_space[1][0] and state_space[1][0] == state_space[2][0] and state_space[0][\n 0] is not ' '):\n return state_space[0][0], \"Done\"\n if (state_space[0][1] == state_space[1][1] and state_space[1][1] == state_space[2][1] and state_space[0][\n 1] is not ' '):\n return state_space[0][1], \"Done\"\n if (state_space[0][2] == state_space[1][2] and state_space[1][2] == state_space[2][2] and state_space[0][\n 2] is not ' '):\n return state_space[0][2], \"Done\"\n\n if (state_space[0][0] == state_space[1][1] and state_space[1][1] == state_space[2][2] and state_space[0][\n 0] is not ' '):\n return state_space[1][1], \"Done\"\n if (state_space[2][0] == state_space[1][1] and state_space[1][1] == state_space[0][2] and state_space[2][\n 0] is not ' '):\n return state_space[1][1], \"Done\"\n # if none of the above is true there must be a draw\n draw = 0\n for i in range(3):\n for j in range(3):\n if state_space[i][j] is ' ':\n draw = 1\n if draw is 0:\n return None, \"Draw\"\n\n return None, \"Not Done\"\n\n\n# Defining the outline of the Tic-Tac Toe for the state_space or environment\ndef outline(state_space):\n print('----------------')\n print('| ' + str(state_space[0][0]) + ' || ' + str(state_space[0][1]) + ' || ' + str(state_space[0][2]) + ' |')\n print('----------------')\n print('| ' + str(state_space[1][0]) + ' || ' + str(state_space[1][1]) + ' || ' + str(state_space[1][2]) + ' |')\n print('----------------')\n print('| ' + str(state_space[2][0]) + ' || ' + str(state_space[2][1]) + ' || ' + str(state_space[2][2]) + ' |')\n print('----------------')\n\n\n# Initializing state values\neach_player = ['X', 'O', ' ']\nstates_dictionary = {}\n# listing all possible states\nstates = [[list(i[0:3]), list(i[3:6]), list(i[6:10])] for i in product(each_player, repeat=9)]\n# getting Total number of states\nTotal_states = len(states)\nprint(\"Total number of states = \", Total_states)\n# Total number of moves/ actions in Tic-Tac-Toe is 9\nTotal_moves = 9\nprint(\"Total number of actions = \", Total_moves)\n# Intializing agent intial value as 0\nsv_O = np.full(Total_states, 0.0)\n\n# Defining the state values for agent O\nfor i in range(Total_states):\n states_dictionary[i] = states[i]\n won_by, _ = cur_state(states_dictionary[i])\n if won_by == 'X':\n sv_O[i] = -1\n elif won_by == 'O':\n sv_O[i] = 1\n\n\n# Using Update rule of Temporal difference to update the state value of 'O'\n# V(s) <- V(s) + alpha * ((V(s^f) - V(s))\n# current_state_value <- current_state_value + learning_rate * (new_state_value - current_state_value)\ndef update_O(alpha, csv, nsv):\n # alpha: learning rate, csv: current state value, nsv: next state value\n sv_O[csv] = sv_O[csv] + alpha * sv_O[nsv]\n\n\n# Testing our Tic-Tac-Toe agent 'O' vs. Human\n# Temporal difference: A RL Algo.\ndef TD(sv, each_player):\n actions = []\n curr_state_values = []\n empty_cells = []\n for i in range(3):\n for j in range(3):\n if sv[i][j] is ' ':\n empty_cells.append(i * 3 + (j + 1))\n\n for empty_cell in empty_cells:\n actions.append(empty_cell)\n new_state = new(sv)\n play(new_state, each_player, empty_cell)\n next_sid = list(states_dictionary.keys())[list(states_dictionary.values()).index(new_state)]\n curr_state_values.append(sv_O[next_sid])\n\n print('Possible Action moves = ' + str(actions))\n print('Action Move values = ' + str(curr_state_values))\n best_move_id = np.argmax(curr_state_values)\n best_move = actions[best_move_id]\n return best_move\n\n\n# Now Playing\n# Loading policy or the trained state values\nsv_O = np.loadtxt('trained_O.txt', dtype=np.float64)\n\nplay_more = \"Y\"\nwhile play_more == 'Y' or play_more == 'y':\n state_space = [[' ', ' ', ' '],[' ', ' ', ' '],[' ', ' ', ' ']]\n curr_state = \"Not Done\"\n print(\"\\n Let's start New Game!\")\n outline(state_space)\n input_choice = input(\"Choose which player to go first - X (Human) or O(RL Agent): \")\n won_by = None\n\n if input_choice == 'X' or input_choice == 'x':\n cid = 0\n else:\n cid = 1\n\n while curr_state == \"Not Done\":\n csv = list(states_dictionary.keys())[list(states_dictionary.values()).index(state_space)]\n if cid == 0:\n print(\"Now Human's turn:\")\n cell_select = int(input(\"It's your turn! Choose a block to place X (1 to 9): \"))\n play(state_space, players[cid], cell_select)\n\n else:\n cell_select = TD(state_space,players[cid])\n play(state_space,players[cid], cell_select)\n print(\"Agent O placed at\" + str(cell_select))\n\n outline(state_space)\n won_by, curr_state = cur_state(state_space)\n if won_by is not None:\n print(str(won_by) + \" Won Won Won!\")\n elif curr_state is \"Draw\":\n print(\"Draw Draw Draw!!!\")\n else:\n cid = (cid + 1) % 2\n\n play_more = input('Wanna Play more? Hit Y/N')\nprint('See you again! :D')\n"
] | [
[
"numpy.full",
"numpy.argmax",
"numpy.loadtxt"
]
] |
Deech08/modspectra | [
"4af177418f9ac3e1ff30bf99968251ac143a96bc"
] | [
"modspectra/tests/test_spectrum_creation.py"
] | [
"import pytest\nfrom numpy.random import randn\nfrom numpy.random import random\nimport numpy as np\n\ndef test_non_detection():\n from ..cube import EmissionCube\n from astropy.coordinates import SkyCoord\n import astropy.units as u\n '''\n Test that an anti-center pointing returns zero emission\n '''\n l = 180. + randn()*130.\n b = 0. + randn()*20.\n while (l > 340.) | (l < 20.): # Ensure actual non-detection\n l = 180. + randn()*130.\n c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)\n spec = EmissionCube.create_DK19_spectrum(c, 0.5 * u.deg, redden = False)\n assert np.allclose(spec.value, np.zeros_like(spec.value))\n\ndef test_coordinate_error():\n from ..cube import EmissionCube\n import astropy.units as u\n '''\n Ensure that a SkyCoord Object is required\n '''\n l = 0. + randn()*5.\n b = 0. + randn()*3.\n try:\n spec = EmissionCube.create_DK19_spectrum((l,b), 0.5 * u.deg, redden = False)\n except TypeError:\n assert True\n else:\n assert False\n\ndef test_galcen_distance():\n from ..cube import EmissionCube\n import astropy.units as u\n from astropy.coordinates import SkyCoord\n '''\n Ensure that a default galcen_distnace is adopted\n '''\n l = 0. + randn()*5.\n b = 0. + randn()*3.\n c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic')\n c2 = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)\n spec = EmissionCube.create_DK19_spectrum(c, 0.5 * u.deg, redden = False)\n spec2 = EmissionCube.create_DK19_spectrum(c2, 0.5 * u.deg, redden = False)\n assert np.allclose(spec.value, spec2.value)\n\ndef test_radius_degrees():\n from ..cube import EmissionCube\n import astropy.units as u\n from astropy.coordinates import SkyCoord\n '''\n Ensure that a default units for radius are in\n '''\n l = 0. + randn()*5.\n b = 0. + randn()*3.\n c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)\n r1 = np.abs( randn()*1000.) * u.arcmin\n r2 = r1.to(u.deg).value\n spec = EmissionCube.create_DK19_spectrum(c, r1, redden = False)\n spec2 = EmissionCube.create_DK19_spectrum(c, r2, redden = False)\n assert np.allclose(spec.value, spec2.value)\n\n\n \n \n"
] | [
[
"numpy.allclose",
"numpy.random.randn",
"numpy.zeros_like"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.