repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
amri369/skin-lesion-segmentation | [
"f777393cfefd9344607ac580de2b5896b60926da"
] | [
"utils/eval.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom dice_loss import dice_coeff\n\n\ndef eval_net(net, loader, device):\n \"\"\"Evaluation without the densecrf with the dice coefficient\"\"\"\n net.eval()\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\n n_val = len(loader) # the number of batch\n tot = 0\n\n with tqdm(total=n_val, desc='Validation round', unit='batch', leave=False) as pbar:\n for batch in loader:\n imgs, true_masks = batch['image'], batch['mask']\n imgs = imgs.to(device=device, dtype=torch.float32)\n true_masks = true_masks.to(device=device, dtype=mask_type)\n\n with torch.no_grad():\n mask_pred = net(imgs)\n\n if net.n_classes > 1:\n tot += F.cross_entropy(mask_pred, true_masks).item()\n else:\n pred = torch.sigmoid(mask_pred)\n pred = (pred > 0.5).float()\n tot += dice_coeff(pred, true_masks).item()\n pbar.update()\n\n net.train()\n return tot / n_val"
] | [
[
"torch.sigmoid",
"torch.nn.functional.cross_entropy",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sixy6e/geospatial-h5 | [
"09fadd92f4a054d27b9428eda9501c24e723952c"
] | [
"examples/attribute_table_example.py"
] | [
"#!/usr/bin/env python\n\nimport numpy\nfrom scipy import ndimage\nimport pandas\nfrom geoh5 import kea\nfrom geoh5.kea import common as kc\n# https://github.com/sixy6e/image-processing\nfrom image_processing.segmentation import Segments\n\n\"\"\"\nOnce completed open the file in tuiview to see the colourised segments\nand the raster attribute table.\n\"\"\"\n\ndef main():\n \"\"\"\n Create a segmented array.\n Compute basic stats for each segment:\n (min, max, mean, standard deviation, total, area)\n Write the segmented image and the raster attribute table.\n \"\"\"\n\n # data dimensions\n dims = (1000, 1000)\n \n # create some random data and segment via value > 5000\n seg_data = numpy.random.randint(0, 10001, dims).astype('uint32')\n seg_data, nlabels = ndimage.label(seg_data > 5000)\n \n # create some random data to calculate stats against\n data = numpy.random.ranf(dims)\n \n # create a segments class object\n seg = Segments(seg_data, include_zero=True)\n \n # retrieve basic stats (min, max, mean, standard deviation, total, area)\n stats_table = seg.basic_statistics(data, dataframe=True)\n stats_table.set_index(\"Segment_IDs\", inplace=True)\n \n # join via segment id, specifying 'outer' will account for empty segments\n df = pandas.DataFrame({\"Histogram\": seg.histogram})\n stats_table = df.join(stats_table, how='outer')\n nrows = stats_table.shape[0]\n\n # assign random colours to each segment\n stats_table.insert(1, \"Red\", numpy.random.randint(0, 256, (nrows)))\n stats_table.insert(2, \"Green\", numpy.random.randint(0, 256, (nrows)))\n stats_table.insert(3, \"Blue\", numpy.random.randint(0, 256, (nrows)))\n stats_table.insert(4, \"Alpha\", 255)\n\n # define 1 output band and add another band later\n kwargs = {'width': dims[1],\n 'height': dims[0],\n 'count': 1,\n 'compression': 4,\n 'chunks': (100, 100),\n 'blocksize': 100,\n 'dtype': seg_data.dtype.name}\n\n with kea.open('attribute-table-example.kea', 'w', **kwargs) as src:\n src.write(seg_data, 1)\n\n # define the layer type as thematic (labelled, classified etc)\n src.write_layer_type(1, kc.LayerType.thematic)\n\n # write the stats table as an attribute table\n usage = {\"Red\": \"Red\",\n \"Green\": \"Green\",\n \"Blue\": \"Blue\",\n \"Alpha\": \"Alpha\",\n \"Histogram\": \"PixelCount\"}\n src.write_rat(stats_table, 1, usage=usage)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"scipy.ndimage.label",
"numpy.random.ranf",
"pandas.DataFrame",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
arung-northwestern/pyIsoP | [
"aed84adad302e0e576cdf089cf03030d9a4b2855"
] | [
"pyIsoP/histo.py"
] | [
"class histo:\n \n #*####### # # ####### ###### ##### # # # # ### ##### ####### ####### ##### ###### # # #\n #*# ## # # # # # # # # # # # # # # # # # # # # # # ## ##\n #*# # # # # # # # # # # # # # # # # # # # # # # # # #\n #*##### # # # ##### ###### # #### # ####### # ##### # # # # #### ###### # # # # #\n #*# # # # # # # # # # # # # # # # # # # # # ####### # #\n #*# # ## # # # # # # # # # # # # # # # # # # # # # #\n #*####### # # ####### # # ##### # # # ### ##### # ####### ##### # # # # # #\n \"\"\"\n * Functions to calculate or read the energy histogram of amaterial\n \n \"\"\"\n\n\n def __init__(self, nbins=50, E_max=1, normed_flag=True):\n \"\"\" \n Initializes the histogram object to store the energy histogram, unit of [K] for the energy.\n\n :type number: int\n :param number: number of bins\n \n :type E: array of floats\n :param E: Energy of each bin in the histogram-- bincenters\n \n :type RhoE: array of floats\n :param RhoE: Normalized energy density, (area under the curve =1)\n\n :type E_max: float\n :param E: Maximum energy where the histogram is cut off. in units of [K]\n * DEFAULT: 10 [K], doesn't change the isotherm from prior testing\n \n\n :raises:\n \n :rtype: Instance of the histogram class\n \n \"\"\"\n\n self.nbins = nbins\n self.E = []\n self.RhoE = []\n self.E_max = E_max\n self.normed_flag = normed_flag\n self.name = ''\n\n # * Calculate histogram from PyIsoP grid object\n def grid2histo(grid_obj, hist_obj):\n \"\"\" \n Calculate histogram from PyIsoP grid object \n\n :type grid_obj: An instance of the grid3D class\n :param grid_obj: Contains energy info on a 3D grid\n \n :type hist_obj: An instance of the histo class\n :param hist_obj: Will be modified and overwritten with energy and density of sites info\n\n :raises:\n \n :rtype: Modified histogram object with E and RhoE.\n \n \"\"\"\n import numpy as np\n\n hist_obj.name = grid_obj.file\n e_vals = np.reshape(grid_obj.pot, (grid_obj.nx*grid_obj.ny*grid_obj.nz, 1), order='C')\n e_vals = e_vals/grid_obj.Temperature # Reduced units for energy\n e_vals = e_vals[~np.isnan(e_vals)]\n bins1 = np.linspace(min(e_vals)-0.5, hist_obj.E_max/grid_obj.Temperature , hist_obj.nbins+1)\n # bins1 = np.reshape(bins1, (1,hist_obj.nbins+1))\n hist_obj.RhoE, binedges1 = np.histogram(e_vals, bins=bins1, density=hist_obj.normed_flag)\n bincenters = 0.5 * (binedges1[1:] + binedges1[:-1]) # Bincenters\n hist_obj.E = bincenters\n return hist_obj\n\n # * Calculate histogram from PyIsoP grid object\n def gridtotal2histo(grid_obj, hist_obj):\n \"\"\" \n Calculate histogram from PyIsoP grid object using the total energy (bead + sphere sampling + disc sampling)\n\n :type grid_obj: An instance of the grid3D class\n :param grid_obj: Contains energy info on a 3D grid\n \n :type hist_obj: An instance of the histo class\n :param hist_obj: Will be modified and overwritten with energy and density of sites info\n\n :raises:\n \n :rtype: Modified histogram object with E and RhoE.\n \n \"\"\"\n import numpy as np\n \n hist_obj.name = grid_obj.file\n \n e_vals = np.reshape(grid_obj.pot_total, (grid_obj.N_grid_total, 1), order='C')\n e_vals = e_vals/grid_obj.Temperature # Reduced units for energy\n e_vals = e_vals[~np.isnan(e_vals)]\n bins1 = np.linspace(min(e_vals)-0.5, hist_obj.E_max/grid_obj.Temperature , hist_obj.nbins+1)\n # bins1 = np.reshape(bins1, (1,hist_obj.nbins+1))\n # e_vals = e_vals[~np.isnan(e_vals)]\n hist_obj.RhoE, binedges1 = np.histogram(e_vals, bins=bins1, density=hist_obj.normed_flag)\n bincenters = 0.5 * (binedges1[1:] + binedges1[:-1]) # Bincenters\n hist_obj.E = bincenters\n return hist_obj\n\n\n # * Calculate histogtram from the RASPA generated grid file, Ben Bucior's version\n def raspa2histo(grid_filename, hist_obj):\n \"\"\" \n Calculate histogtram from the RASPA generated grid file, Ben Bucior's version\n find it here: \n\n :type grid_file:str\n :param grid_file: Path to the grid file generated by RASPA. Grid file format x y z energy dE/dx dE/dy dE/dz. \n\n :type hist_obj: instance of the histo class. \n :param hist_obj: contains all the info regarding the energy distribution of sites for a given material. \n\n :raises: One has to initialize the histogram object before calling this function\n shouldn't raise any error if the .grid file is from the RASPA version published along with PyIsoP \n which is x y z E dEx dEy dEz (last three optional), no header lines.\n\n :rtype: histogram object with the energies and their densities updated\n\n \"\"\"\n import numpy as np\n import pandas as pd\n\n grid = pd.read_csv(grid_filename, header=None, delim_whitespace=True) # Read the energy data\n e_vals = pd.to_numeric(grid[3][grid[3].as_matrix() != '?'])\n e_vals = e_vals/grid_obj.Temperature # Reduced units for energy\n e_vals = e_vals[~np.isnan(e_vals)]\n bins1 = np.linspace(min(e_vals)-0.5, hist_obj.E_max/grid_obj.Temperature , hist_obj.nbins+1)\n hist_obj.RhoE, binedges1 = np.histogram(e_vals, bins=bins1, density=hist_obj.normed_flag)\n bincenters = 0.5 * (binedges1[1:] + binedges1[:-1]) # Bincenters\n hist_obj.E = bincenters\n\n return hist_obj\n\n # * Read the histogram in from a file with no header and two columns\n def file2histo(histo_filename, hist_obj):\n \"\"\" \n\n Read the histogram in from a file with no header and two columns \n\n :type histo_filename: str \n :param histo_filename: path to the histogram text file with two columns, keep the energy dimensionless to avoid confusion.\n \n :type hist_obj: instance of the histo class\n :param hist_obj: contains all the info regarding the energy histogram of sites for a given material \n \n :raises: One has to initialize the histogram object before calling this function.\n pandas could throw up if the file is not formatted correctly. Refer examples section in the documentation.\n \n :rtype: histogram object with the energies and their densities updated\n\n \"\"\"\n\n import pandas as pd\n import numpy as np\n\n histo_temp = pd.read_csv(histo_filename, header=None, delim_whitespace=True) # Read the energy data\n hist_obj.E = np.array(histo_temp[0])\n hist_obj.RhoE = np.array(histo_temp[1])\n hist_obj.name = histo_filename\n hist_obj.nbins = len(hist_obj.E) # Number of bins is dictated by the row in the histogram text file\n\n return hist_obj\n\n # * Convert cube file to histogram, .cube file maybe generated using Prof. Cory Simon's code.\n def cube2histo(cube_filename, hist_obj):\n \"\"\" \n Convert cube file to histogram, .cube file maybe generated using Prof. Cory Simon's code in Julia.\n\n :type cube_filename: str\n :param cube_filename: path to the cube file generated by Prof. Cory Simon's code\n\n :type hist_obj: instance of the histo class\n :param hist_obj: contains all the info regarding the energy histogram of sites for a given material \n\n :raises: One has to initialize the histogram object before calling this function.\n pandas could throw up if the file is not formatted correctly. Refer documentation page\n\n :rtype: instance of the histo class.\n\n \"\"\"\n import pandas as pd\n import numpy as np\n from ase.io import read, write\n from ase.io.cube import read_cube_data\n\n data, atoms = read_cube_data(cube_filename)\n e_vals = np.reshape(data, (data.size, 1), order='C')\n e_vals = e_vals/grid_obj.Temperature # Reduced units for energy\n e_vals = e_vals[~np.isnan(e_vals)]\n bins1 = np.linspace(min(e_vals)-0.5, hist_obj.E_max/grid_obj.Temperature , hist_obj.nbins+1)\n hist_obj.RhoE, binedges1 = np.histogram(e_vals, bins=bins1, density=hist_obj.normed_flag)\n bincenters = 0.5 * (binedges1[1:] + binedges1[:-1]) # Bincenters\n hist_obj.E = bincenters\n\n return hist_obj\n"
] | [
[
"pandas.read_csv",
"numpy.reshape",
"numpy.isnan",
"numpy.array",
"numpy.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tmastny/siuba | [
"7a234bc6d03b7ad3ba6054c8899fd27ccb7f05aa"
] | [
"siuba/experimental/pd_groups/groupby.py"
] | [
"from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\nimport inspect\nfrom pandas.core import algorithms\nimport pandas as pd\n\n\n# Custom SeriesGroupBy class ==================================================\n\nclass GroupByAgg(SeriesGroupBy):\n def __init__(self, *args, orig_grouper, orig_obj, should_cast, **kwargs):\n self._orig_grouper = orig_grouper\n self._orig_obj = orig_obj\n self._should_cast = should_cast\n super().__init__(*args, **kwargs)\n \n def _broadcast_agg_result(self):\n return broadcast_agg_result(\n self._orig_grouper, \n self.obj,\n self._orig_obj,\n cast = self._should_cast\n )\n \n @classmethod\n def from_result(cls, result, groupby):\n if not isinstance(result, pd.Series):\n raise TypeError(\"requires pandas Series\")\n\n # Series.groupby is hard-coded to produce a SeriesGroupBy,\n # but it's signature is very large, so use inspect to bind on it.\n sig = inspect.signature(result.groupby)\n bound = sig.bind(by = result.index)\n \n orig_grouper = groupby._orig_grouper if isinstance(groupby, cls) else groupby.grouper\n orig_obj = groupby._orig_obj if isinstance(groupby, cls) else groupby.obj\n should_cast = False\n \n return cls(\n result,\n *bound.args, **bound.kwargs,\n orig_grouper = orig_grouper,\n orig_obj = orig_obj,\n should_cast = should_cast\n )\n\ndef broadcast_agg_result(grouper, result, obj, cast = False):\n \"\"\"\n fast version of transform, only applicable to\n builtin/cythonizable functions\n \"\"\"\n ids, _, ngroup = grouper.group_info\n out = algorithms.take_1d(result._values, ids)\n \n # TODO: consequence of skipping this step? A cast is already done\n # once when aggregating....\n if cast:\n out = try_cast(out, obj)\n return pd.Series(out, index=obj.index, name=obj.name)\n\n\n# Utils =======================================================================\n\ndef all_isinstance(cls, *args):\n return all(isinstance(x, cls) for x in args)\n\ndef _regroup(res, groupby):\n if isinstance(groupby, GroupByAgg):\n # need to manually a constructor, since Series classes are hardcoded\n # all over the pandas library :/ :/ :/\n return groupby.from_result(res, groupby)\n elif isinstance(groupby, SeriesGroupBy):\n return res.groupby(groupby.grouper)\n \n raise ValueError(\"Unknown group by class: %s\"% type(groupby))\n\n\n# Broadcasting Groupby elements -----------------------------------------------\n\ndef grouper_match(grp1, grp2):\n if not isinstance(grp2, SeriesGroupBy):\n raise TypeError(\"grp2 must be a SeriesGroupBy\")\n\n if grp1._orig_grouper is not grp2.grouper:\n raise ValueError(\"groups must match\")\n\n return grp1._broadcast_agg_result(), grp2.obj, grp2\n \n\ndef broadcast_group_elements(x, y):\n \"\"\"Returns 3-tuple of same-length x and y data, plus a reference group by object.\n\n Note:\n * Raises error if x and y are not compatible group by objects.\n * Will broadcast a GroupByAgg, to ensure same length as other data.\n \"\"\"\n if all_isinstance(GroupByAgg, x, y) and x._orig_grouper is y._orig_grouper:\n return x.obj, y.obj, x\n \n elif isinstance(x, GroupByAgg):\n return grouper_match(x, y)\n \n elif isinstance(y, GroupByAgg):\n res_y, res_x, grp = grouper_match(y, x)\n return res_x, res_y, grp\n \n elif all_isinstance(SeriesGroupBy, x, y) and x.grouper is y.grouper:\n return x.obj, y.obj, x\n\n raise ValueError(\"need groupby objects with matching groupers\")\n\n"
] | [
[
"pandas.core.algorithms.take_1d",
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"1.1",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
yingCMU/deep-reinforcement-learning | [
"d1a9b6c5d4d310e8fdd75ff2d39003a12a1343b2"
] | [
"dqn/exercise/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \"*** YOUR CODE HERE ***\"\n hidden_1 = 64\n hidden_2 = 64\n self.fc1 = nn.Linear(state_size, hidden_1)\n # linear layer (n_hidden -> hidden_2)\n self.fc2 = nn.Linear(hidden_1, hidden_2)\n # linear layer (n_hidden -> 10)\n self.fc3 = nn.Linear(hidden_2, action_size)\n # dropout layer (p=0.2)\n # dropout prevents overfitting of data\n# self.dropout = nn.Dropout(0.2)\n\n def forward(self, x):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(x))\n # add dropout layer\n# x = self.dropout(x)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc2(x))\n # add dropout layer\n# x = self.dropout(x)\n # add output layer\n x = self.fc3(x)\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.manual_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kelvinkoh0308/addons | [
"75e847bae25fc64b5c08a26f6bc8c669cba5b169"
] | [
"tensorflow_addons/text/crf_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for CRF.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons import text\nfrom tensorflow_addons.utils import test_utils\n\n\n@test_utils.run_all_in_graph_and_eager_modes\nclass CrfTest(tf.test.TestCase):\n def calculateSequenceScore(self, inputs, transition_params, tag_indices,\n sequence_lengths):\n expected_unary_score = sum(\n inputs[i][tag_indices[i]] for i in range(sequence_lengths))\n expected_binary_score = sum(\n transition_params[tag_indices[i], tag_indices[i + 1]]\n for i in range(sequence_lengths - 1))\n return expected_unary_score + expected_binary_score\n\n def testCrfSequenceScore(self):\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n # Test both the length-1 and regular cases.\n sequence_lengths_list = [\n np.array(3, dtype=np.int32),\n np.array(1, dtype=np.int32)\n ]\n inputs_list = [\n np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32),\n np.array([[4, 5, -3]], dtype=np.float32),\n ]\n tag_indices_list = [\n np.array([1, 2, 1, 0], dtype=np.int32),\n np.array([1], dtype=np.int32)\n ]\n for sequence_lengths, inputs, tag_indices in zip(\n sequence_lengths_list, inputs_list, tag_indices_list):\n sequence_score = text.crf_sequence_score(\n inputs=tf.expand_dims(inputs, 0),\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n sequence_score = tf.squeeze(sequence_score, [0])\n\n tf_sequence_score = self.evaluate(sequence_score)\n\n expected_sequence_score = self.calculateSequenceScore(\n inputs, transition_params, tag_indices, sequence_lengths)\n self.assertAllClose(tf_sequence_score, expected_sequence_score)\n\n def testCrfMultiTagSequenceScore(self):\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n # Test both the length-1 and regular cases.\n sequence_lengths_list = [\n np.array(3, dtype=np.int32),\n np.array(1, dtype=np.int32)\n ]\n inputs_list = [\n np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32),\n np.array([[4, 5, -3]], dtype=np.float32),\n ]\n tag_bitmap_list = [\n np.array([[True, True, False], [True, False, True],\n [False, True, True], [True, False, True]],\n dtype=np.bool),\n np.array([[True, True, False]], dtype=np.bool)\n ]\n for sequence_lengths, inputs, tag_bitmap in zip(\n sequence_lengths_list, inputs_list, tag_bitmap_list):\n sequence_score = text.crf_multitag_sequence_score(\n inputs=tf.expand_dims(inputs, 0),\n tag_bitmap=tf.expand_dims(tag_bitmap, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n sequence_score = tf.squeeze(sequence_score, [0])\n tf_sum_sequence_score = self.evaluate(sequence_score)\n all_indices_list = [\n single_index_bitmap.nonzero()[0]\n for single_index_bitmap in tag_bitmap[:sequence_lengths]\n ]\n expected_sequence_scores = [\n self.calculateSequenceScore(inputs, transition_params, indices,\n sequence_lengths)\n for indices in itertools.product(*all_indices_list)\n ]\n expected_log_sum_exp_sequence_scores = np.logaddexp.reduce(\n expected_sequence_scores)\n self.assertAllClose(tf_sum_sequence_score,\n expected_log_sum_exp_sequence_scores)\n\n def testCrfUnaryScore(self):\n inputs = np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32)\n for dtype in (np.int32, np.int64):\n tag_indices = np.array([1, 2, 1, 0], dtype=dtype)\n sequence_lengths = np.array(3, dtype=np.int32)\n unary_score = text.crf_unary_score(\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n inputs=tf.expand_dims(inputs, 0))\n unary_score = tf.squeeze(unary_score, [0])\n tf_unary_score = self.evaluate(unary_score)\n expected_unary_score = sum(\n inputs[i][tag_indices[i]] for i in range(sequence_lengths))\n self.assertAllClose(tf_unary_score, expected_unary_score)\n\n def testCrfBinaryScore(self):\n tag_indices = np.array([1, 2, 1, 0], dtype=np.int32)\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n sequence_lengths = np.array(3, dtype=np.int32)\n binary_score = text.crf_binary_score(\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n binary_score = tf.squeeze(binary_score, [0])\n tf_binary_score = self.evaluate(binary_score)\n expected_binary_score = sum(\n transition_params[tag_indices[i], tag_indices[i + 1]]\n for i in range(sequence_lengths - 1))\n self.assertAllClose(tf_binary_score, expected_binary_score)\n\n def testCrfLogNorm(self):\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n # Test both the length-1 and regular cases.\n sequence_lengths_list = [\n np.array(3, dtype=np.int32),\n np.array(1, dtype=np.int64)\n ]\n inputs_list = [\n np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32),\n np.array([[3, -1, 3]], dtype=np.float32),\n ]\n tag_indices_list = [\n np.array([1, 2, 1, 0], dtype=np.int32),\n np.array([2], dtype=np.int32)\n ]\n\n for sequence_lengths, inputs, tag_indices in zip(\n sequence_lengths_list, inputs_list, tag_indices_list):\n num_words = inputs.shape[0]\n num_tags = inputs.shape[1]\n all_sequence_scores = []\n\n # Compare the dynamic program with brute force computation.\n for tag_indices in itertools.product(\n range(num_tags), repeat=sequence_lengths):\n tag_indices = list(tag_indices)\n tag_indices.extend([0] * (num_words - sequence_lengths))\n all_sequence_scores.append(\n text.crf_sequence_score(\n inputs=tf.expand_dims(inputs, 0),\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params)))\n\n brute_force_log_norm = tf.reduce_logsumexp(all_sequence_scores)\n log_norm = text.crf_log_norm(\n inputs=tf.expand_dims(inputs, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n log_norm = tf.squeeze(log_norm, [0])\n tf_brute_force_log_norm, tf_log_norm = self.evaluate(\n [brute_force_log_norm, log_norm])\n\n self.assertAllClose(tf_log_norm, tf_brute_force_log_norm)\n\n def testCrfLogNormZeroSeqLength(self):\n \"\"\"Test `crf_log_norm` when `sequence_lengths` contains one or more\n zeros.\"\"\"\n inputs = tf.constant(np.ones([2, 10, 5], dtype=np.float32))\n transition_params = tf.constant(np.ones([5, 5], dtype=np.float32))\n sequence_lengths = tf.constant(np.zeros([2], dtype=np.int32))\n expected_log_norm = np.zeros([2], dtype=np.float32)\n log_norm = text.crf_log_norm(inputs, sequence_lengths,\n transition_params)\n tf_log_norm = self.evaluate(log_norm)\n self.assertAllClose(tf_log_norm, expected_log_norm)\n\n def testCrfLogLikelihood(self):\n inputs = np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32)\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n sequence_lengths = np.array(3, dtype=np.int32)\n num_words = inputs.shape[0]\n num_tags = inputs.shape[1]\n all_sequence_log_likelihoods = []\n\n # Make sure all probabilities sum to 1.\n for tag_indices in itertools.product(\n range(num_tags), repeat=sequence_lengths):\n tag_indices = list(tag_indices)\n tag_indices.extend([0] * (num_words - sequence_lengths))\n sequence_log_likelihood, _ = text.crf_log_likelihood(\n inputs=tf.expand_dims(inputs, 0),\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n all_sequence_log_likelihoods.append(sequence_log_likelihood)\n total_log_likelihood = tf.reduce_logsumexp(\n all_sequence_log_likelihoods)\n tf_total_log_likelihood = self.evaluate(total_log_likelihood)\n self.assertAllClose(tf_total_log_likelihood, 0.0)\n\n # check if `transition_params = None` raises an error\n text.crf_log_likelihood(\n inputs=tf.expand_dims(inputs, 0),\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0))\n\n def testViterbiDecode(self):\n inputs = np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32)\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n sequence_lengths = np.array(3, dtype=np.int32)\n num_words = inputs.shape[0]\n num_tags = inputs.shape[1]\n\n all_sequence_scores = []\n all_sequences = []\n\n # Compare the dynamic program with brute force computation.\n for tag_indices in itertools.product(\n range(num_tags), repeat=sequence_lengths):\n tag_indices = list(tag_indices)\n tag_indices.extend([0] * (num_words - sequence_lengths))\n all_sequences.append(tag_indices)\n sequence_score = text.crf_sequence_score(\n inputs=tf.expand_dims(inputs, 0),\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n sequence_score = tf.squeeze(sequence_score, [0])\n all_sequence_scores.append(sequence_score)\n\n tf_all_sequence_scores = self.evaluate(all_sequence_scores)\n\n expected_max_sequence_index = np.argmax(tf_all_sequence_scores)\n expected_max_sequence = all_sequences[expected_max_sequence_index]\n expected_max_score = tf_all_sequence_scores[\n expected_max_sequence_index]\n\n actual_max_sequence, actual_max_score = text.viterbi_decode(\n inputs[:sequence_lengths], transition_params)\n\n self.assertAllClose(actual_max_score, expected_max_score)\n self.assertEqual(actual_max_sequence,\n expected_max_sequence[:sequence_lengths])\n\n def testCrfDecode(self):\n transition_params = np.array([[-3, 5, -2], [3, 4, 1], [1, 2, 1]],\n dtype=np.float32)\n # Test both the length-1 and regular cases.\n sequence_lengths_list = [\n np.array(3, dtype=np.int32),\n np.array(1, dtype=np.int64)\n ]\n inputs_list = [\n np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],\n dtype=np.float32),\n np.array([[-1, 2, 1]], dtype=np.float32),\n ]\n tag_indices_list = [\n np.array([1, 2, 1, 0], dtype=np.int32),\n np.array([2], dtype=np.int32)\n ]\n\n for sequence_lengths, inputs, tag_indices in zip(\n sequence_lengths_list, inputs_list, tag_indices_list):\n num_words = inputs.shape[0]\n num_tags = inputs.shape[1]\n\n all_sequence_scores = []\n all_sequences = []\n\n # Compare the dynamic program with brute force computation.\n for tag_indices in itertools.product(\n range(num_tags), repeat=sequence_lengths):\n tag_indices = list(tag_indices)\n tag_indices.extend([0] * (num_words - sequence_lengths))\n all_sequences.append(tag_indices)\n sequence_score = text.crf_sequence_score(\n inputs=tf.expand_dims(inputs, 0),\n tag_indices=tf.expand_dims(tag_indices, 0),\n sequence_lengths=tf.expand_dims(sequence_lengths, 0),\n transition_params=tf.constant(transition_params))\n sequence_score = tf.squeeze(sequence_score, [0])\n all_sequence_scores.append(sequence_score)\n\n tf_all_sequence_scores = self.evaluate(all_sequence_scores)\n\n expected_max_sequence_index = np.argmax(tf_all_sequence_scores)\n expected_max_sequence = all_sequences[expected_max_sequence_index]\n expected_max_score = tf_all_sequence_scores[\n expected_max_sequence_index]\n\n actual_max_sequence, actual_max_score = text.crf_decode(\n tf.expand_dims(inputs, 0), tf.constant(transition_params),\n tf.expand_dims(sequence_lengths, 0))\n actual_max_sequence = tf.squeeze(actual_max_sequence, [0])\n actual_max_score = tf.squeeze(actual_max_score, [0])\n tf_actual_max_sequence, tf_actual_max_score = self.evaluate(\n [actual_max_sequence, actual_max_score])\n\n self.assertAllClose(tf_actual_max_score, expected_max_score)\n self.assertEqual(\n list(tf_actual_max_sequence[:sequence_lengths]),\n expected_max_sequence[:sequence_lengths])\n\n def testCrfDecodeZeroSeqLength(self):\n \"\"\"Test that crf_decode works when sequence_length contains one or more\n zeros.\"\"\"\n inputs = tf.constant(np.ones([2, 10, 5], dtype=np.float32))\n transition_params = tf.constant(np.ones([5, 5], dtype=np.float32))\n sequence_lengths = tf.constant(np.zeros([2], dtype=np.int32))\n tags, scores = text.crf_decode(inputs, transition_params,\n sequence_lengths)\n tf_tags, tf_scores = self.evaluate([tags, scores])\n self.assertEqual(len(tf_tags.shape), 2)\n self.assertEqual(len(tf_scores.shape), 1)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.squeeze",
"numpy.ones",
"tensorflow.expand_dims",
"numpy.argmax",
"numpy.logaddexp.reduce",
"numpy.array",
"numpy.zeros",
"tensorflow.reduce_logsumexp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
CHarpprecht/premise | [
"ead72ab613f083a5a11d57677d876f4ef258ccdd"
] | [
"premise/cement.py"
] | [
"import copy\nimport uuid\nimport numpy as np\nimport wurst\nfrom wurst import searching as ws\nfrom wurst import transformations as wt\nfrom .activity_maps import InventorySet\nfrom .geomap import Geomap\nfrom .utils import *\nfrom datetime import date\n\n\nclass Cement:\n \"\"\"\n Class that modifies clinker and cement production datasets in ecoinvent, mostly based on WBCSD's GNR data.\n :ivar scenario: name of a Remind pathway\n :vartype pathway: str\n\n \"\"\"\n\n def __init__(self, db, model, scenario, iam_data, year, version):\n self.db = db\n self.model = model\n self.scenario = scenario\n self.iam_data = iam_data\n self.year = year\n self.version = version\n self.geo = Geomap(model=model)\n\n self.clinker_ratio_eco = get_clinker_ratio_ecoinvent(version)\n self.clinker_ratio_remind = get_clinker_ratio_remind(self.year)\n self.fuels_lhv = get_lower_heating_values()\n self.fuels_co2 = get_fuel_co2_emission_factors()\n mapping = InventorySet(self.db)\n self.emissions_map = mapping.get_remind_to_ecoinvent_emissions()\n self.fuel_map = mapping.generate_fuel_map()\n\n def fetch_proxies(self, name, ref_prod, relink=False):\n \"\"\"\n Fetch dataset proxies, given a dataset `name` and `reference product`.\n Store a copy for each REMIND region.\n If a REMIND region does not find a fitting ecoinvent location,\n fetch a dataset with a \"RoW\" location.\n Delete original datasets from the database.\n\n :return:\n \"\"\"\n\n d_map = {\n self.geo.ecoinvent_to_iam_location(d[\"location\"]): d[\"location\"]\n for d in ws.get_many(\n self.db,\n ws.equals(\"name\", name),\n ws.equals(\"reference product\", ref_prod),\n )\n }\n\n list_iam_regions = [\n c[1]\n for c in self.geo.geo.keys()\n if type(c) == tuple and c[0].lower() == self.model\n ]\n\n d_iam_to_eco = {r: d_map.get(r, \"RoW\") for r in list_iam_regions}\n\n d_act = {}\n\n for d in d_iam_to_eco:\n try:\n ds = ws.get_one(\n self.db,\n ws.equals(\"name\", name),\n ws.equals(\"reference product\", ref_prod),\n ws.equals(\"location\", d_iam_to_eco[d]),\n )\n\n d_act[d] = wt.copy_to_new_location(ds, d)\n d_act[d][\"code\"] = str(uuid.uuid4().hex)\n\n if \"input\" in d_act[d]:\n d_act[d].pop(\"input\")\n\n if relink:\n d_act[d] = relink_technosphere_exchanges(\n d_act[d], self.db, self.model\n )\n\n except ws.NoResults:\n print(\n \"No dataset {} found for the {} region {}\".format(\n name, self.model.upper(), d\n )\n )\n continue\n\n deleted_markets = [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in self.db\n if (act[\"name\"], act[\"reference product\"]) == (name, ref_prod)\n ]\n\n with open(\n DATA_DIR\n / \"logs/log deleted cement datasets {} {} {}-{}.csv\".format(\n self.model, self.scenario, self.year, date.today()\n ),\n \"a\",\n ) as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\", lineterminator=\"\\n\")\n for line in deleted_markets:\n writer.writerow(line)\n\n # Remove old datasets\n self.db = [\n act\n for act in self.db\n if (act[\"name\"], act[\"reference product\"]) != (name, ref_prod)\n ]\n\n return d_act\n\n @staticmethod\n def remove_exchanges(exchanges_dict, list_exc):\n\n keep = lambda x: {\n k: v\n for k, v in x.items()\n if not any(ele in x.get(\"product\", list()) for ele in list_exc)\n }\n\n for r in exchanges_dict:\n exchanges_dict[r][\"exchanges\"] = [\n keep(exc) for exc in exchanges_dict[r][\"exchanges\"]\n ]\n\n return exchanges_dict\n\n def get_suppliers_of_a_region(\n self,\n iam_region,\n ecoinvent_technologies,\n reference_product,\n unit=\"kilogram\",\n look_for_locations_in=\"ecoinvent\",\n ):\n \"\"\"\n Return a list of datasets which location and name correspond to the region, name and reference product given,\n respectively.\n\n :param unit: unit of the dataset. If not specified, \"kilogram\" is used.\n :param look_for_locations_in: whether it should look for a supplier in ecoinvent locations or IAM locations.\n :param iam_region: an IAM region\n :type iam_region: str\n :param ecoinvent_technologies: list of names of ecoinvent dataset\n :type ecoinvent_technologies: list\n :param reference_product: reference product\n :type reference_product: str\n :return: list of wurst datasets\n :rtype: list\n \"\"\"\n if look_for_locations_in == \"ecoinvent\":\n return ws.get_many(\n self.db,\n *[\n ws.either(\n *[\n ws.contains(\"name\", supplier)\n for supplier in ecoinvent_technologies\n ]\n ),\n ws.either(\n *[\n ws.equals(\"location\", loc)\n for loc in self.geo.iam_to_ecoinvent_location(iam_region)\n ]\n ),\n ws.equals(\"unit\", unit),\n ws.equals(\"reference product\", reference_product),\n ]\n )\n else:\n return ws.get_many(\n self.db,\n *[\n ws.either(\n *[\n ws.contains(\"name\", supplier)\n for supplier in ecoinvent_technologies\n ]\n ),\n ws.equals(\"location\", look_for_locations_in),\n ws.equals(\"unit\", unit),\n ws.equals(\"reference product\", reference_product),\n ]\n )\n\n @staticmethod\n def get_shares_from_production_volume(ds):\n \"\"\"\n Return shares of supply based on production volumes\n :param ds: list of datasets\n :return: dictionary with (dataset name, dataset location) as keys, shares as values. Shares total 1.\n :rtype: dict\n \"\"\"\n dict_act = {}\n total_production_volume = 0\n for act in ds:\n for exc in ws.production(act):\n dict_act[\n (\n act[\"name\"],\n act[\"location\"],\n act[\"reference product\"],\n act[\"unit\"],\n )\n ] = float(exc[\"production volume\"])\n total_production_volume += float(exc[\"production volume\"])\n\n for d in dict_act:\n if total_production_volume != 0:\n dict_act[d] /= total_production_volume\n else:\n dict_act[d] = 1 / len(dict_act)\n\n return dict_act\n\n def update_pollutant_emissions(self, ds):\n \"\"\"\n Update pollutant emissions based on GAINS data.\n We apply a correction factor defined as being equal to\n the emission level in the year in question, compared\n to 2020\n :return:\n \"\"\"\n\n # Update biosphere exchanges according to GAINS emission values\n for exc in ws.biosphere(\n ds, ws.either(*[ws.contains(\"name\", x) for x in self.emissions_map])\n ):\n remind_emission_label = self.emissions_map[exc[\"name\"]]\n\n if (\n self.model == \"remind\"\n and ds[\"location\"] in self.iam_data.cement_emissions.region\n or ds[\"location\"] == \"World\"\n ):\n correction_factor = (\n self.iam_data.cement_emissions.loc[\n dict(\n region=ds[\"location\"]\n if ds[\"location\"] != \"World\"\n else \"CHA\",\n pollutant=remind_emission_label,\n )\n ].interp(year=self.year)\n / self.iam_data.cement_emissions.loc[\n dict(\n region=ds[\"location\"]\n if ds[\"location\"] != \"World\"\n else \"CHA\",\n pollutant=remind_emission_label,\n year=2020,\n )\n ]\n ).values.item(0)\n\n elif (\n self.model == \"image\"\n and self.geo.iam_to_iam_region(ds[\"location\"])\n in self.iam_data.cement_emissions.region\n ):\n correction_factor = (\n self.iam_data.cement_emissions.loc[\n dict(\n region=self.geo.iam_to_iam_region(ds[\"location\"]),\n pollutant=remind_emission_label,\n )\n ].interp(year=self.year)\n / self.iam_data.cement_emissions.loc[\n dict(\n region=self.geo.iam_to_iam_region(ds[\"location\"]),\n pollutant=remind_emission_label,\n year=2020,\n )\n ]\n ).values.item(0)\n else:\n correction_factor = (\n self.iam_data.cement_emissions.loc[\n dict(\n region=self.geo.ecoinvent_to_iam_location(ds[\"location\"]),\n pollutant=remind_emission_label,\n )\n ].interp(year=self.year)\n / self.iam_data.cement_emissions.loc[\n dict(\n region=self.geo.ecoinvent_to_iam_location(ds[\"location\"]),\n pollutant=remind_emission_label,\n year=2020,\n )\n ]\n ).values.item(0)\n\n if correction_factor != 0 and ~np.isnan(correction_factor):\n if exc[\"amount\"] == 0:\n wurst.rescale_exchange(\n exc, correction_factor / 1, remove_uncertainty=True\n )\n else:\n wurst.rescale_exchange(exc, correction_factor)\n\n exc[\n \"comment\"\n ] = \"This exchange has been modified based on GAINS projections for the cement sector by `premise`.\"\n return ds\n\n def build_clinker_market_datasets(self):\n # Fetch clinker market activities and store them in a dictionary\n return self.fetch_proxies(\"market for clinker\", \"clinker\", relink=True)\n\n def fuel_efficiency_factor(self, loc):\n \"\"\"\n\n :param loc: location of the exchange\n :return: correction factor\n :rtype: float\n \"\"\"\n\n if self.model == \"remind\":\n # REMIND\n final_energy = [\"FE|Industry|Cement\"]\n prod = \"Production|Industry|Cement\"\n else:\n # IMAGE\n final_energy = [\n \"Final Energy|Industry|Cement|Electricity\",\n \"Final Energy|Industry|Cement|Gases\",\n \"Final Energy|Industry|Cement|Heat\",\n \"Final Energy|Industry|Cement|Hydrogen\",\n \"Final Energy|Industry|Cement|Liquids\",\n \"Final Energy|Industry|Cement|Solids\",\n ]\n prod = \"Production|Cement\"\n\n # sometimes, the energy consumption values are not reported for the region \"World\"\n # in such case, we then look at the sum of all the regions\n if (\n self.iam_data.data.loc[dict(region=loc, variables=final_energy)]\n .interp(year=self.year)\n .sum()\n == 0\n ):\n loc = self.iam_data.data.region.values\n\n eff_factor = (\n (\n self.iam_data.data.loc[\n dict(\n region=[loc] if isinstance(loc, str) else loc,\n variables=final_energy,\n )\n ]\n .interp(year=self.year)\n .sum(dim=[\"region\", \"variables\"])\n / self.iam_data.data.loc[\n dict(region=[loc] if isinstance(loc, str) else loc, variables=prod,)\n ]\n .interp(year=self.year)\n .sum(dim=\"region\")\n )\n / (\n self.iam_data.data.loc[\n dict(\n region=[loc] if isinstance(loc, str) else loc,\n variables=final_energy,\n year=2020,\n )\n ].sum(dim=[\"region\", \"variables\"])\n / self.iam_data.data.loc[\n dict(\n region=[loc] if isinstance(loc, str) else loc,\n variables=prod,\n year=2020,\n )\n ].sum(dim=\"region\")\n )\n ).values\n\n # we assume efficiency cannot get worse over time\n if eff_factor == np.nan or eff_factor == np.inf or eff_factor > 1:\n eff_factor = 1\n\n return eff_factor\n\n def get_carbon_capture_rate(self, loc):\n \"\"\"\n Returns the carbon capture rate as indicated by the IAM\n It is calculated as CO2 captured / (CO2 captured + CO2 emitted)\n\n :param loc: location of the dataset\n :return: rate of carbon capture\n :rtype: float\n \"\"\"\n\n if self.model == \"remind\":\n if all(\n x in self.iam_data.data.variables.values\n for x in [\n \"Emi|CCO2|FFaI|Industry|Cement\",\n \"Emi|CO2|FFaI|Industry|Cement\",\n ]\n ):\n rate = (\n self.iam_data.data.sel(\n variables=\"Emi|CCO2|FFaI|Industry|Cement\", region=loc\n ).interp(year=self.year)\n / self.iam_data.data.sel(\n variables=[\n \"Emi|CCO2|FFaI|Industry|Cement\",\n \"Emi|CO2|FFaI|Industry|Cement\",\n ],\n region=loc,\n )\n .interp(year=self.year)\n .sum(dim=\"variables\")\n ).values\n else:\n rate = 0\n else:\n if all(\n x in self.iam_data.data.variables.values\n for x in [\n \"Emissions|CO2|Industry|Cement|Gross\",\n \"Emissions|CO2|Industry|Cement|Sequestered\",\n ]\n ):\n # let's check that we have values\n # sometimes, values are not reported for the \"World \" region\n\n rate = (\n self.iam_data.data.sel(\n variables=\"Emissions|CO2|Industry|Cement|Sequestered\",\n region=[loc] if loc != \"World\" else [l for l in self.iam_data.data.region.values],\n ).interp(year=self.year).sum(dim=\"region\")\n / self.iam_data.data.sel(\n variables=[\n \"Emissions|CO2|Industry|Cement|Gross\",\n \"Emissions|CO2|Industry|Cement|Sequestered\",\n ],\n region=[loc] if loc != \"World\" else [l for l in self.iam_data.data.region.values],\n )\n .interp(year=self.year)\n .sum(dim=[\"variables\", \"region\"])\n ).values\n else:\n rate = 0\n\n return rate\n\n def build_clinker_production_datasets(self):\n \"\"\"\n Builds clinker production datasets for each IAM region.\n If `industry_module_present`, the kiln efficiency improvement follows projections from the IAM model\n # If not, it follows projections from the IEA\n Add CO2 capture and Storage if needed.\n Source for CO2 capture and compression: https://www.sciencedirect.com/science/article/pii/S1750583613001230?via%3Dihub#fn0040\n :return: a dictionary with IAM regions as keys and clinker production datasets as values.\n :rtype: dict\n \"\"\"\n\n # Fetch clinker production activities and store them in a dictionary\n d_act_clinker = self.fetch_proxies(\"clinker production\", \"clinker\", relink=True)\n\n # Fuel exchanges to remove\n list_fuels = [\n \"diesel\",\n \"coal\",\n \"lignite\",\n \"coke\",\n \"fuel\",\n \"meat\",\n \"gas\",\n \"oil\",\n \"electricity\",\n \"wood\",\n \"waste\",\n ]\n\n # we first create current clinker production activities for each region\n # from GNR data, to get a new fuel efficiency and mix than what is\n # currently in ecoinvent\n\n # Remove fuel and electricity exchanges in each activity\n d_act_clinker = self.remove_exchanges(d_act_clinker, list_fuels)\n\n for k, v in d_act_clinker.items():\n\n # Production volume by kiln type\n energy_input_per_kiln_type = self.iam_data.gnr_data.sel(\n region=self.geo.iam_to_iam_region(k)\n if self.model == \"image\"\n else k,\n variables=[\n v\n for v in self.iam_data.gnr_data.variables.values\n if \"Production volume share\" in v\n ],\n ).clip(0, 1)\n # Energy input per ton of clinker, in MJ, per kiln type\n energy_input_per_kiln_type /= energy_input_per_kiln_type.sum(axis=0)\n\n energy_eff_per_kiln_type = self.iam_data.gnr_data.sel(\n region=self.geo.iam_to_iam_region(k)\n if self.model == \"image\"\n else k,\n variables=[\n v\n for v in self.iam_data.gnr_data.variables.values\n if \"Thermal energy consumption\" in v\n ],\n )\n\n # Weighted average energy input per ton clinker, in MJ\n energy_input_per_ton_clinker = (\n energy_input_per_kiln_type.values * energy_eff_per_kiln_type.values\n )\n\n # the correction factor applied to all fuel/electricity input is\n # equal to the ratio fuel/output in the year in question\n # divided by the ratio fuel/output in 2020\n\n correction_factor = self.fuel_efficiency_factor(v[\"location\"])\n energy_input_per_ton_clinker *= correction_factor\n\n # Fuel mix (waste, biomass, fossil)\n fuel_mix = self.iam_data.gnr_data.sel(\n variables=[\n \"Share waste fuel\",\n \"Share biomass fuel\",\n \"Share fossil fuel\",\n ],\n region=self.geo.iam_to_iam_region(k)\n if self.model == \"image\"\n else k,\n ).clip(0, 1)\n\n fuel_mix /= fuel_mix.sum(axis=0)\n\n # Calculate quantities (in kg) of fuel, per type of fuel, per ton of clinker\n # MJ per ton of clinker * fuel mix * (1 / lower heating value)\n fuel_qty_per_type = (\n energy_input_per_ton_clinker.sum()\n * fuel_mix\n * 1\n / np.array(\n [\n float(self.fuels_lhv[\"waste\"]),\n float(self.fuels_lhv[\"wood pellet\"]),\n float(self.fuels_lhv[\"hard coal\"]),\n ]\n )\n )\n\n fuel_fossil_co2_per_type = (\n energy_input_per_ton_clinker.sum()\n * fuel_mix\n * np.array(\n [\n (\n self.fuels_co2[\"waste\"][\"co2\"]\n * (1 - self.fuels_co2[\"waste\"][\"bio_share\"])\n ),\n (\n self.fuels_co2[\"wood pellet\"][\"co2\"]\n * (1 - self.fuels_co2[\"wood pellet\"][\"bio_share\"])\n ),\n (\n self.fuels_co2[\"hard coal\"][\"co2\"]\n * (1 - self.fuels_co2[\"hard coal\"][\"bio_share\"])\n ),\n ]\n )\n )\n\n fuel_biogenic_co2_per_type = (\n energy_input_per_ton_clinker.sum()\n * fuel_mix\n * np.array(\n [\n (\n self.fuels_co2[\"waste\"][\"co2\"]\n * (self.fuels_co2[\"waste\"][\"bio_share\"])\n ),\n (\n self.fuels_co2[\"wood pellet\"][\"co2\"]\n * (self.fuels_co2[\"wood pellet\"][\"bio_share\"])\n ),\n (\n self.fuels_co2[\"hard coal\"][\"co2\"]\n * (self.fuels_co2[\"hard coal\"][\"bio_share\"])\n ),\n ]\n )\n )\n\n # Append it to the dataset exchanges\n new_exchanges = []\n\n for f, fuel in enumerate(\n [\n (\"waste\", \"waste plastic, mixture\"),\n (\"wood pellet\", \"wood pellet, measured as dry mass\"),\n (\"hard coal\", \"hard coal\"),\n ]\n ):\n # Select waste fuel providers, fitting the IAM region\n # Fetch respective shares based on production volumes\n fuel_suppliers = self.get_shares_from_production_volume(\n self.get_suppliers_of_a_region(\n k, self.fuel_map[fuel[0]], fuel[1]\n )\n )\n if len(fuel_suppliers) == 0:\n fuel_suppliers = self.get_shares_from_production_volume(\n self.get_suppliers_of_a_region(\n k, self.fuel_map[fuel[0]], fuel[1], look_for_locations_in=\"ecoinvent\"\n )\n )\n\n if len(fuel_suppliers) == 0:\n loc = \"World\"\n fuel_suppliers = self.get_shares_from_production_volume(\n self.get_suppliers_of_a_region(\n loc, self.fuel_map[fuel[0]], fuel[1], look_for_locations_in=\"ecoinvent\"\n )\n )\n\n for s, supplier in enumerate(fuel_suppliers):\n new_exchanges.append(\n {\n \"uncertainty type\": 0,\n \"loc\": 1,\n \"amount\": (\n fuel_suppliers[supplier]\n * fuel_qty_per_type[f].values\n )\n / 1000,\n \"type\": \"technosphere\",\n \"production volume\": 0,\n \"product\": supplier[2],\n \"name\": supplier[0],\n \"unit\": supplier[3],\n \"location\": supplier[1],\n }\n )\n\n v[\"exchanges\"].extend(new_exchanges)\n\n v[\"exchanges\"] = [v for v in v[\"exchanges\"] if v]\n\n # Carbon capture rate: share of capture of total CO2 emitted\n carbon_capture_rate = self.get_carbon_capture_rate(v[\"location\"])\n\n # Update fossil CO2 exchange, add 525 kg of fossil CO_2 from calcination\n fossil_co2_exc = [\n e for e in v[\"exchanges\"] if e[\"name\"] == \"Carbon dioxide, fossil\"\n ][0]\n\n fossil_co2_exc[\"amount\"] = (\n (fuel_fossil_co2_per_type.sum().values + 525) / 1000\n ) * (1 - carbon_capture_rate)\n fossil_co2_exc[\"uncertainty type\"] = 0\n\n try:\n # Update biogenic CO2 exchange\n biogenic_co2_exc = [\n e\n for e in v[\"exchanges\"]\n if e[\"name\"] == \"Carbon dioxide, non-fossil\"\n ][0]\n biogenic_co2_exc[\"amount\"] = (\n fuel_biogenic_co2_per_type.sum().values / 1000\n ) * (1 - carbon_capture_rate)\n biogenic_co2_exc[\"uncertainty type\"] = 0\n except IndexError:\n # There isn't a biogenic CO2 emissions exchange\n biogenic_co2_exc = {\n \"uncertainty type\": 0,\n \"loc\": 1,\n \"amount\": (fuel_biogenic_co2_per_type.sum().values / 1000)\n * (1 - carbon_capture_rate),\n \"type\": \"biosphere\",\n \"production volume\": 0,\n \"name\": \"Carbon dioxide, non-fossil\",\n \"unit\": \"kilogram\",\n \"input\": (\"biosphere3\", \"eba59fd6-f37e-41dc-9ca3-c7ea22d602c7\"),\n \"categories\": (\"air\",),\n }\n v[\"exchanges\"].append(biogenic_co2_exc)\n\n # add CCS-related dataset\n if carbon_capture_rate > 0:\n\n ds = ws.get_one(\n self.db,\n ws.equals(\n \"name\",\n \"CO2 capture, at cement production plant, with underground storage, post, 200 km\",\n ),\n ws.equals(\"location\", \"RER\"),\n )\n\n ccs = wt.copy_to_new_location(ds, v[\"location\"])\n ccs[\"code\"] = str(uuid.uuid4().hex)\n ccs = relink_technosphere_exchanges(ccs, self.db, self.model)\n\n if \"input\" in ccs:\n ccs.pop(\"input\")\n\n # we first fix the biogenic CO2 permanent storage\n # share = sum of biogenic fuel emissions / (sum of fossil fuel emission\n # + sum of biogenic fuel emissions + 525 kg from calcination)\n for exc in ws.biosphere(\n ccs,\n ws.equals(\"name\", \"Carbon dioxide, to soil or biomass stock\"),\n ):\n exc[\"amount\"] = (fuel_biogenic_co2_per_type.sum() / (\n fuel_fossil_co2_per_type.sum()\n + fuel_biogenic_co2_per_type.sum()\n + 525\n )).values.item(0)\n\n # 0.11 kg CO2 leaks per kg captured\n # we need to align the CO2 composition with\n # the CO2 composition of the cement plant\n for exc in ws.biosphere(\n ccs,\n ws.equals(\"name\", \"Carbon dioxide, from soil or biomass stock\"),\n ):\n exc[\"amount\"] = (\n fuel_biogenic_co2_per_type.sum()\n / (\n fuel_fossil_co2_per_type.sum()\n + fuel_biogenic_co2_per_type.sum()\n + 525\n )\n ).values.item(0) * 0.11\n\n for exc in ws.biosphere(\n ccs, ws.equals(\"name\", \"Carbon dioxide, fossil\")\n ):\n exc[\"amount\"] = 0.11 - (\n (\n fuel_biogenic_co2_per_type.sum()\n / (\n fuel_fossil_co2_per_type.sum()\n + fuel_biogenic_co2_per_type.sum()\n + 525\n )\n )\n * 0.11\n ).values.item(0)\n\n # we adjust the heat needs by subtraction 3.66 MJ with what\n # the cement plant is expected to produce as excess heat\n\n # Heat, as steam: 3.66 MJ/kg CO2 captured, minus excess heat generated on site\n excess_heat_generation = (\n self.iam_data.gnr_data.sel(\n variables=\"Share of recovered energy, per ton clinker\",\n region=self.geo.iam_to_iam_region(v[\"location\"])\n if self.model == \"image\"\n else v[\"location\"],\n ).values\n * (energy_input_per_ton_clinker.sum() / 1000)\n )\n\n for exc in ws.technosphere(\n ccs, ws.contains(\"name\", \"steam production\")\n ):\n exc[\"amount\"] = np.clip(3.66 - excess_heat_generation, 0, 3.66)\n\n # then, we need to find local suppliers of electricity, water, steam, etc.\n relink_technosphere_exchanges(ccs, self.db, self.model)\n\n # we add this new dataset to the database\n self.db.append(ccs)\n\n # add an input from this CCS dataset in the clinker dataset\n ccs_exc = {\n \"uncertainty type\": 0,\n \"loc\": 0,\n \"amount\": (\n (\n fuel_fossil_co2_per_type.sum().values\n + fuel_biogenic_co2_per_type.sum().values\n )\n / 1000\n )\n * carbon_capture_rate,\n \"type\": \"technosphere\",\n \"production volume\": 0,\n \"name\": \"CO2 capture, at cement production plant, with underground storage, post, 200 km\",\n \"unit\": \"kilogram\",\n \"location\": v[\"location\"],\n \"product\": \"CO2, captured and stored\",\n }\n v[\"exchanges\"].append(ccs_exc)\n\n v[\"exchanges\"] = [v for v in v[\"exchanges\"] if v]\n\n v[\"comment\"] = (\n \"WARNING: Dataset modified by `premise` based on WBCSD's GNR data and IAM projections \"\n + \" for the cement industry.\\n\"\n + \"Calculated energy input per kg clinker: {} MJ/kg clinker.\\n\".format(\n np.round(energy_input_per_ton_clinker.sum(), 1) / 1000\n )\n + \"Improvement of energy input per kg clinker compared to 2020: {} %.\\n\".format(\n (correction_factor - 1) * 100\n )\n + \"Share of biomass fuel energy-wise: {} pct.\\n\".format(\n int(fuel_mix[1] * 100)\n )\n + \"Share of waste fuel energy-wise: {} pct.\\n\".format(\n int(fuel_mix[0] * 100)\n )\n + \"Share of fossil carbon in waste fuel energy-wise: {} pct.\\n\".format(\n int(self.fuels_co2[\"waste\"][\"bio_share\"] * 100)\n )\n + \"Share of fossil CO2 emissions from fuel combustion: {} pct.\\n\".format(\n int(\n (\n fuel_fossil_co2_per_type.sum()\n / (fuel_fossil_co2_per_type.sum() + 525)\n )\n * 100\n )\n )\n + \"Share of fossil CO2 emissions from calcination: {} pct.\\n\".format(\n 100\n - int(\n (\n fuel_fossil_co2_per_type.sum()\n / np.sum(fuel_fossil_co2_per_type.sum() + 525)\n )\n * 100\n )\n )\n + \"Rate of carbon capture: {} pct.\\n\".format(\n int(carbon_capture_rate * 100)\n )\n ) + v[\"comment\"]\n\n\n # TODO: currently, uses the relative improvement as given by GAINS in reference to 2020\n print(\n \"Adjusting emissions of hot pollutants for clinker production datasets...\"\n )\n d_act_clinker = {\n k: self.update_pollutant_emissions(v) for k, v in d_act_clinker.items()\n }\n\n return d_act_clinker\n\n def relink_datasets(self, name, ref_product):\n \"\"\"\n For a given dataset name, change its location to an IAM location,\n to effectively link the newly built dataset(s).\n\n :param ref_product:\n :param name: dataset name\n :type name: str\n \"\"\"\n\n list_ds = [\n (ds[\"name\"], ds[\"reference product\"], ds[\"location\"]) for ds in self.db\n ]\n\n for act in self.db:\n excs = [\n exc\n for exc in act[\"exchanges\"]\n if (exc[\"name\"], exc.get(\"product\")) == (name, ref_product)\n and exc[\"type\"] == \"technosphere\"\n ]\n\n amount = 0\n for exc in excs:\n amount += exc[\"amount\"]\n act[\"exchanges\"].remove(exc)\n\n if amount > 0:\n new_exc = {\n \"name\": name,\n \"product\": ref_product,\n \"amount\": amount,\n \"type\": \"technosphere\",\n \"unit\": \"kilogram\",\n }\n\n if (name, ref_product, act[\"location\"]) in list_ds:\n new_exc[\"location\"] = act[\"location\"]\n else:\n try:\n new_loc = self.geo.ecoinvent_to_iam_location(act[\"location\"])\n except KeyError:\n new_loc = \"\"\n\n if (name, ref_product, new_loc) in list_ds:\n new_exc[\"location\"] = new_loc\n else:\n # new locations in ei3.7, not yet defined in `constructive_geometries`\n if act[\"location\"] in (\n \"North America without Quebec\",\n \"US only\",\n ):\n new_loc = self.geo.ecoinvent_to_iam_location(\"US\")\n new_exc[\"location\"] = new_loc\n\n elif act[\"location\"] in (\"RoW\", \"GLO\"):\n new_loc = self.geo.ecoinvent_to_iam_location(\"CN\")\n new_exc[\"location\"] = new_loc\n\n elif act[\"location\"] in (\n \"RER w/o RU\",\n \"WECC\",\n \"UCTE without Germany\",\n ):\n new_loc = self.geo.ecoinvent_to_iam_location(\"RER\")\n new_exc[\"location\"] = new_loc\n\n else:\n print(\n \"Issue with {} used in {}: cannot find the IAM equivalent for \"\n \"the location {}\".format(\n name, act[\"name\"], act[\"location\"]\n )\n )\n\n act[\"exchanges\"].append(new_exc)\n\n def adjust_clinker_ratio(self, d_act):\n \"\"\" Adjust the cement suppliers composition for \"cement, unspecified\", in order to reach\n the average clinker-to-cement ratio given by the IAM.\n\n The supply of the cement with the highest clinker-to-cement ratio is decreased by 1% to the favor of\n the supply of the cement with the lowest clinker-to-cement ratio, and the average clinker-to-cement ratio\n is calculated.\n\n This operation is repeated until the average clinker-to-cement ratio aligns with that given by the IAM.\n When the supply of the cement with the highest clinker-to-cement ratio goes below 1%,\n the cement with the second highest clinker-to-cement ratio becomes affected and so forth.\n\n \"\"\"\n\n for d in d_act:\n\n ratio_to_reach = self.clinker_ratio_remind.sel(\n dict(\n region=self.geo.iam_to_iam_region(d) if self.model == \"image\" else d\n )\n ).values\n\n share = []\n ratio = []\n\n for exc in d_act[d][\"exchanges\"]:\n if \"cement\" in exc[\"product\"] and exc[\"type\"] == \"technosphere\":\n share.append(exc[\"amount\"])\n ratio.append(self.clinker_ratio_eco[(exc[\"name\"], exc[\"location\"])])\n\n share = np.array(share)\n ratio = np.array(ratio)\n\n average_ratio = (share * ratio).sum()\n\n iteration = 0\n while average_ratio > ratio_to_reach and iteration < 100:\n share[share == 0] = np.nan\n\n ratio = np.where(share >= 0.001, ratio, np.nan)\n\n highest_ratio = np.nanargmax(ratio)\n lowest_ratio = np.nanargmin(ratio)\n\n share[highest_ratio] -= 0.01\n share[lowest_ratio] += 0.01\n\n average_ratio = (np.nan_to_num(ratio) * np.nan_to_num(share)).sum()\n iteration += 1\n\n share = np.nan_to_num(share)\n\n count = 0\n for exc in d_act[d][\"exchanges\"]:\n if \"cement\" in exc[\"product\"] and exc[\"type\"] == \"technosphere\":\n exc[\"amount\"] = share[count]\n count += 1\n\n return d_act\n\n def update_cement_production_datasets(self, name, ref_prod):\n \"\"\"\n Update electricity use (mainly for grinding).\n\n :return:\n \"\"\"\n # Fetch proxies\n # Delete old datasets\n d_act_cement = self.fetch_proxies(name, ref_prod)\n # Update electricity use\n d_act_cement = self.update_electricity_exchanges(d_act_cement)\n\n return d_act_cement\n\n def update_electricity_exchanges(self, d_act):\n \"\"\"\n Update electricity exchanges in cement production datasets.\n Electricity consumption equals electricity use minus on-site electricity generation from excess heat recovery.\n\n :return:\n \"\"\"\n d_act = self.remove_exchanges(d_act, [\"electricity\"])\n\n for act in d_act:\n\n new_exchanges = []\n electricity_needed = (\n self.iam_data.gnr_data.loc[\n dict(\n variables=\"Power consumption\",\n region=self.geo.iam_to_iam_region(act)\n if self.model == \"image\"\n else act,\n )\n ].values\n / 1000\n )\n electricity_recovered = (\n self.iam_data.gnr_data.loc[\n dict(\n variables=\"Power generation\",\n region=self.geo.iam_to_iam_region(act)\n if self.model == \"image\"\n else act,\n )\n ].values\n / 1000\n )\n\n electricity_suppliers = self.get_shares_from_production_volume(\n self.get_suppliers_of_a_region(\n iam_region=act,\n ecoinvent_technologies=[\"electricity, medium voltage\"],\n reference_product=\"electricity, medium voltage\",\n unit=\"kilowatt hour\",\n look_for_locations_in=act,\n )\n )\n\n if len(electricity_suppliers) == 0:\n electricity_suppliers = self.get_shares_from_production_volume(\n self.get_suppliers_of_a_region(\n iam_region=act,\n ecoinvent_technologies=[\"electricity, medium voltage\"],\n reference_product=\"electricity, medium voltage\",\n unit=\"kilowatt hour\",\n look_for_locations_in=\"ecoinvent\",\n )\n )\n\n for s, supplier in enumerate(electricity_suppliers):\n share = electricity_suppliers[supplier]\n new_exchanges.append(\n {\n \"uncertainty type\": 0,\n \"loc\": 1,\n \"amount\": (electricity_needed - electricity_recovered) * share,\n \"type\": \"technosphere\",\n \"production volume\": 0,\n \"product\": supplier[2],\n \"name\": supplier[0],\n \"unit\": supplier[3],\n \"location\": supplier[1],\n }\n )\n\n d_act[act][\"exchanges\"].extend(new_exchanges)\n d_act[act][\"exchanges\"] = [v for v in d_act[act][\"exchanges\"] if v]\n\n d_act[act][\"comment\"] = (\n \"WARNING: Dataset modified by `premise` based on WBCSD's GNR data and 2018 IEA roadmap for the cement industry.\\n \"\n + \"Electricity consumption per kg cement: {} kWh.\\n\".format(\n electricity_needed\n )\n + \"Of which {} kWh were generated from on-site waste heat recovery.\\n\".format(\n electricity_recovered\n )\n ) + d_act[act][\"comment\"]\n\n return d_act\n\n def add_datasets_to_database(self):\n\n print(\"\\nStart integration of cement data...\\n\")\n\n print(\n \"The validity of the datasets produced from the integration of the cement sector is not yet fully tested.\\n\"\n \"Consider the results with caution.\\n\"\n )\n\n print(\"Log of deleted cement datasets saved in {}\".format(DATA_DIR / \"logs\"))\n print(\"Log of created cement datasets saved in {}\".format(DATA_DIR / \"logs\"))\n\n if not os.path.exists(DATA_DIR / \"logs\"):\n os.makedirs(DATA_DIR / \"logs\")\n\n with open(\n DATA_DIR\n / \"logs/log deleted cement datasets {} {} {}-{}.csv\".format(\n self.model, self.scenario, self.year, date.today()\n ),\n \"w\",\n ) as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\", lineterminator=\"\\n\")\n writer.writerow([\"dataset name\", \"reference product\", \"location\"])\n\n with open(\n DATA_DIR\n / \"logs/log created cement datasets {} {} {}-{}.csv\".format(\n self.model, self.scenario, self.year, date.today()\n ),\n \"w\",\n ) as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\", lineterminator=\"\\n\")\n writer.writerow([\"dataset name\", \"reference product\", \"location\"])\n\n created_datasets = list()\n\n print(\"\\nCreate new clinker production datasets and delete old datasets\")\n clinker_prod_datasets = [\n d\n for d in self.build_clinker_production_datasets().values()\n ]\n self.db.extend(clinker_prod_datasets)\n\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in clinker_prod_datasets\n ]\n )\n\n print(\"\\nCreate new clinker market datasets and delete old datasets\")\n clinker_market_datasets = [\n d for d in self.build_clinker_market_datasets().values()\n ]\n self.db.extend(clinker_market_datasets)\n\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in clinker_market_datasets\n ]\n )\n\n print('Adjust clinker-to-cement ratio in \"unspecified cement\" datasets')\n\n if self.version == 3.5:\n name = \"market for cement, unspecified\"\n ref_prod = \"cement, unspecified\"\n\n else:\n name = \"cement, all types to generic market for cement, unspecified\"\n ref_prod = \"cement, unspecified\"\n\n act_cement_unspecified = self.fetch_proxies(name, ref_prod)\n\n act_cement_unspecified = self.adjust_clinker_ratio(act_cement_unspecified)\n self.db.extend([v for v in act_cement_unspecified.values()])\n\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in act_cement_unspecified.values()\n ]\n )\n\n print(\n \"\\nCreate new cement production datasets and adjust electricity consumption\"\n )\n\n if self.version == 3.5:\n\n for i in (\n (\n \"market for cement, alternative constituents 21-35%\",\n \"cement, alternative constituents 21-35%\",\n ),\n (\n \"market for cement, alternative constituents 6-20%\",\n \"cement, alternative constituents 6-20%\",\n ),\n (\n \"market for cement, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n \"cement, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n ),\n (\n \"market for cement, blast furnace slag 25-70%, US only\",\n \"cement, blast furnace slag 25-70%, US only\",\n ),\n (\n \"market for cement, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n \"cement, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n ),\n (\n \"market for cement, blast furnace slag 36-65%, non-US\",\n \"cement, blast furnace slag 36-65%, non-US\",\n ),\n (\n \"market for cement, blast furnace slag 5-25%, US only\",\n \"cement, blast furnace slag 5-25%, US only\",\n ),\n (\n \"market for cement, blast furnace slag 70-100%, non-US\",\n \"cement, blast furnace slag 70-100%, non-US\",\n ),\n (\n \"market for cement, blast furnace slag 70-100%, US only\",\n \"cement, blast furnace slag 70-100%, US only\",\n ),\n (\n \"market for cement, blast furnace slag 81-95%, non-US\",\n \"cement, blast furnace slag 81-95%, non-US\",\n ),\n (\n \"market for cement, blast furnace slag, 66-80%, non-US\",\n \"cement, blast furnace slag, 66-80%, non-US\",\n ),\n (\"market for cement, Portland\", \"cement, Portland\"),\n (\n \"market for cement, pozzolana and fly ash 11-35%, non-US\",\n \"cement, pozzolana and fly ash 11-35%, non-US\",\n ),\n (\n \"market for cement, pozzolana and fly ash 15-40%, US only\",\n \"cement, pozzolana and fly ash 15-40%, US only\",\n ),\n (\n \"market for cement, pozzolana and fly ash 36-55%,non-US\",\n \"cement, pozzolana and fly ash 36-55%,non-US\",\n ),\n (\n \"market for cement, pozzolana and fly ash 5-15%, US only\",\n \"cement, pozzolana and fly ash 5-15%, US only\",\n ),\n ):\n act_cement = self.fetch_proxies(i[0], i[1])\n self.db.extend([v for v in act_cement.values()])\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in act_cement.values()\n ]\n )\n\n self.relink_datasets(i[0], i[1])\n\n for i in (\n (\n \"cement production, alternative constituents 21-35%\",\n \"cement, alternative constituents 21-35%\",\n ),\n (\n \"cement production, alternative constituents 6-20%\",\n \"cement, alternative constituents 6-20%\",\n ),\n (\n \"cement production, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n \"cement, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n ),\n (\n \"cement production, blast furnace slag 25-70%, US only\",\n \"cement, blast furnace slag 25-70%, US only\",\n ),\n (\n \"cement production, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n \"cement, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n ),\n (\n \"cement production, blast furnace slag 36-65%, non-US\",\n \"cement, blast furnace slag 36-65%, non-US\",\n ),\n (\n \"cement production, blast furnace slag 5-25%, US only\",\n \"cement, blast furnace slag 5-25%, US only\",\n ),\n (\n \"cement production, blast furnace slag 70-100%, non-US\",\n \"cement, blast furnace slag 70-100%, non-US\",\n ),\n (\n \"cement production, blast furnace slag 70-100%, US only\",\n \"cement, blast furnace slag 70-100%, US only\",\n ),\n (\n \"cement production, blast furnace slag 81-95%, non-US\",\n \"cement, blast furnace slag 81-95%, non-US\",\n ),\n (\n \"cement production, blast furnace slag, 66-80%, non-US\",\n \"cement, blast furnace slag, 66-80%, non-US\",\n ),\n (\"cement production, Portland\", \"cement, Portland\"),\n (\n \"cement production, pozzolana and fly ash 11-35%, non-US\",\n \"cement, pozzolana and fly ash 11-35%, non-US\",\n ),\n (\n \"cement production, pozzolana and fly ash 15-40%, US only\",\n \"cement, pozzolana and fly ash 15-40%, US only\",\n ),\n (\n \"cement production, pozzolana and fly ash 36-55%,non-US\",\n \"cement, pozzolana and fly ash 36-55%,non-US\",\n ),\n (\n \"cement production, pozzolana and fly ash 5-15%, US only\",\n \"cement, pozzolana and fly ash 5-15%, US only\",\n ),\n ):\n act_cement = self.update_cement_production_datasets(i[0], i[1])\n self.db.extend([v for v in act_cement.values()])\n\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in act_cement.values()\n ]\n )\n self.relink_datasets(i[0], i[1])\n\n print(\"\\nCreate new cement market datasets\")\n\n else:\n print(\"\\nCreate new cement market datasets\")\n\n for i in (\n (\"market for cement, Portland\", \"cement, Portland\"),\n (\n \"market for cement, blast furnace slag 35-70%\",\n \"cement, blast furnace slag 35-70%\",\n ),\n (\n \"market for cement, blast furnace slag 6-34%\",\n \"cement, blast furnace slag 6-34%\",\n ),\n (\"market for cement, limestone 6-10%\", \"cement, limestone 6-10%\"),\n (\n \"market for cement, pozzolana and fly ash 15-50%\",\n \"cement, pozzolana and fly ash 15-50%\",\n ),\n (\n \"market for cement, pozzolana and fly ash 6-14%\",\n \"cement, pozzolana and fly ash 6-14%\",\n ),\n (\n \"market for cement, alternative constituents 6-20%\",\n \"cement, alternative constituents 6-20%\",\n ),\n (\n \"market for cement, alternative constituents 21-35%\",\n \"cement, alternative constituents 21-35%\",\n ),\n (\n \"market for cement, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n \"cement, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n ),\n (\n \"market for cement, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n \"cement, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n ),\n (\n \"market for cement, blast furnace slag 36-65%\",\n \"cement, blast furnace slag 36-65%\",\n ),\n (\n \"market for cement, blast furnace slag 66-80%\",\n \"cement, blast furnace slag, 66-80%\",\n ),\n (\n \"market for cement, blast furnace slag 81-95%\",\n \"cement, blast furnace slag 81-95%\",\n ),\n (\n \"market for cement, pozzolana and fly ash 11-35%\",\n \"cement, pozzolana and fly ash 11-35%\",\n ),\n (\n \"market for cement, pozzolana and fly ash 36-55%\",\n \"cement, pozzolana and fly ash 36-55%\",\n ),\n (\n \"market for cement, alternative constituents 45%\",\n \"cement, alternative constituents 45%\",\n ),\n (\n \"market for cement, blast furnace slag 40-70%\",\n \"cement, blast furnace 40-70%\",\n ),\n (\n \"market for cement, pozzolana and fly ash 25-35%\",\n \"cement, pozzolana and fly ash 25-35%\",\n ),\n (\"market for cement, limestone 21-35%\", \"cement, limestone 21-35%\"),\n (\n \"market for cement, blast furnace slag 21-35%\",\n \"cement, blast furnace slag 21-35%\",\n ),\n (\n \"market for cement, blast furnace slag 25-70%\",\n \"cement, blast furnace slag 25-70%\",\n ),\n (\n \"market for cement, blast furnace slag 5-25%\",\n \"cement, blast furnace slag 5-25%\",\n ),\n (\n \"market for cement, blast furnace slag 6-20%\",\n \"cement, blast furnace slag 6-20%\",\n ),\n (\n \"market for cement, blast furnace slag 70-100%\",\n \"cement, blast furnace slag 70-100%\",\n ),\n (\n \"market for cement, pozzolana and fly ash 15-40%\",\n \"cement, pozzolana and fly ash 15-40%\",\n ),\n (\n \"market for cement, pozzolana and fly ash 5-15%\",\n \"cement, pozzolana and fly ash 5-15%\",\n ),\n (\"market for cement, unspecified\", \"cement, unspecified\"),\n (\n \"market for cement, portland fly ash cement 21-35%\",\n \"cement, portland fly ash cement 21-35%\",\n ),\n (\n \"market for cement, portland fly ash cement 6-20%\",\n \"cement, portland fly ash cement 6-20%\",\n ),\n (\"market for cement mortar\", \"cement mortar\"),\n (\n \"market for cement, limestone cement 6-20%\",\n \"cement, limestone 6-20%\",\n ),\n\n ):\n act_cement = self.fetch_proxies(i[0], i[1])\n self.db.extend([v for v in act_cement.values()])\n\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in act_cement.values()\n ]\n )\n\n self.relink_datasets(i[0], i[1])\n\n for i in (\n (\"cement production, Portland\", \"cement, Portland\"),\n (\n \"cement production, blast furnace slag 35-70%\",\n \"cement, blast furnace slag 35-70%\",\n ),\n (\n \"cement production, blast furnace slag 6-34%\",\n \"cement, blast furnace slag 6-34%\",\n ),\n (\"cement production, limestone 6-10%\", \"cement, limestone 6-10%\"),\n (\n \"cement production, pozzolana and fly ash 15-50%\",\n \"cement, pozzolana and fly ash 15-50%\",\n ),\n (\n \"cement production, pozzolana and fly ash 6-14%\",\n \"cement, pozzolana and fly ash 6-14%\",\n ),\n (\n \"cement production, alternative constituents 6-20%\",\n \"cement, alternative constituents 6-20%\",\n ),\n (\n \"cement production, alternative constituents 21-35%\",\n \"cement, alternative constituents 21-35%\",\n ),\n (\n \"cement production, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n \"cement, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n ),\n (\n \"cement production, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n \"cement, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n ),\n (\n \"cement production, blast furnace slag 36-65%\",\n \"cement, blast furnace slag 36-65%\",\n ),\n (\n \"cement production, blast furnace slag 66-80%\",\n \"cement, blast furnace slag, 66-80%\",\n ),\n (\n \"cement production, blast furnace slag 81-95%\",\n \"cement, blast furnace slag 81-95%\",\n ),\n (\n \"cement production, pozzolana and fly ash 11-35%\",\n \"cement, pozzolana and fly ash 11-35%\",\n ),\n (\n \"cement production, pozzolana and fly ash 36-55%\",\n \"cement, pozzolana and fly ash 36-55%\",\n ),\n (\n \"cement production, alternative constituents 45%\",\n \"cement, alternative constituents 45%\",\n ),\n (\n \"cement production, blast furnace slag 40-70%\",\n \"cement, blast furnace 40-70%\",\n ),\n (\n \"cement production, pozzolana and fly ash 25-35%\",\n \"cement, pozzolana and fly ash 25-35%\",\n ),\n (\"cement production, limestone 21-35%\", \"cement, limestone 21-35%\"),\n (\n \"cement production, blast furnace slag 21-35%\",\n \"cement, blast furnace slag 21-35%\",\n ),\n (\n \"cement production, blast furnace slag 25-70%\",\n \"cement, blast furnace slag 25-70%\",\n ),\n (\n \"cement production, blast furnace slag 5-25%\",\n \"cement, blast furnace slag 5-25%\",\n ),\n (\n \"cement production, blast furnace slag 6-20%\",\n \"cement, blast furnace slag 6-20%\",\n ),\n (\n \"cement production, blast furnace slag 70-100%\",\n \"cement, blast furnace slag 70-100%\",\n ),\n (\n \"cement production, pozzolana and fly ash 15-40%\",\n \"cement, pozzolana and fly ash 15-40%\",\n ),\n (\n \"cement production, pozzolana and fly ash 5-15%\",\n \"cement, pozzolana and fly ash 5-15%\",\n ),\n (\n \"cement production, blast furnace slag 31-50% and 31-50% other alternative constituents\",\n \"hard coal ash\",\n ),\n (\n \"cement production, pozzolana and fly ash 6-14%\",\n \"nickel smelter slag\",\n ),\n (\n \"cement production, fly ash 21-35%\",\n \"cement, portland fly ash cement 21-35%\",\n ),\n (\"cement production, fly ash 21-35%\", \"hard coal ash\"),\n (\"cement production, limestone 21-35%\", \"hard coal ash\"),\n (\"cement production, pozzolana and fly ash 6-14%\", \"hard coal ash\"),\n (\"cement production, alternative constituents 45%\", \"hard coal ash\"),\n (\"cement production, limestone 6-20%\", \"cement, limestone 6-20%\"),\n (\n \"cement production, pozzolana and fly ash 15-50%\",\n \"nickel smelter slag\",\n ),\n (\n \"cement production, blast furnace slag 18-30% and 18-30% other alternative constituents\",\n \"hard coal ash\",\n ),\n (\"cement production, alternative constituents 6-20%\", \"hard coal ash\"),\n\n (\"cement production, pozzolana and fly ash 36-55%\", \"hard coal ash\"),\n (\"cement production, pozzolana and fly ash 15-50%\", \"hard coal ash\"),\n (\n \"cement production, fly ash 6-20%\",\n \"cement, portland fly ash cement 6-20%\",\n ),\n (\"cement production, alternative constituents 21-35%\", \"hard coal ash\"),\n (\"cement production, pozzolana and fly ash 5-15%\", \"hard coal ash\"),\n (\"cement production, pozzolana and fly ash 11-35%\", \"hard coal ash\"),\n (\"cement production, pozzolana and fly ash 25-35%\", \"hard coal ash\"),\n (\"cement production, fly ash 6-20%\", \"hard coal ash\"),\n (\"cement production, pozzolana and fly ash 15-40%\", \"hard coal ash\"),\n ):\n act_cement = self.update_cement_production_datasets(i[0], i[1])\n self.db.extend([v for v in act_cement.values()])\n\n created_datasets.extend(\n [\n (act[\"name\"], act[\"reference product\"], act[\"location\"])\n for act in act_cement.values()\n ]\n )\n\n self.relink_datasets(i[0], i[1])\n\n if self.version == 3.5:\n name = \"market for cement, unspecified\"\n ref_prod = \"cement, unspecified\"\n\n else:\n name = \"cement, all types to generic market for cement, unspecified\"\n ref_prod = \"cement, unspecified\"\n self.relink_datasets(name, ref_prod)\n\n with open(\n DATA_DIR\n / \"logs/log created cement datasets {} {} {}-{}.csv\".format(\n self.model, self.scenario, self.year, date.today()\n ),\n \"a\",\n ) as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\", lineterminator=\"\\n\")\n for line in created_datasets:\n writer.writerow(line)\n\n print(\"Relink cement production datasets to new clinker market datasets\")\n self.relink_datasets(\"market for clinker\", \"clinker\")\n\n print(\"Relink clinker market datasets to new clinker production datasets\")\n self.relink_datasets(\"clinker production\", \"clinker\")\n\n return self.db\n"
] | [
[
"numpy.nanargmax",
"numpy.clip",
"numpy.isnan",
"numpy.nan_to_num",
"numpy.nanargmin",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jameshgrn/ESIP_DEV | [
"5f890daabee5013ae495ed2c5da3cc028a00d094"
] | [
"fill_reference_table.py"
] | [
"# %% Section: MetaInfo\n__author__ = ['John Franey', 'Jake Gearon']\n__credits__ = ['John Franey', 'Jake Gearon', 'Earth Science Information Partners (ESIP)']\n__version__ = '1.0.0'\n__maintainer__ = 'John Franey'\n__email__ = '[email protected]'\n__status__ = 'Development'\ndef replace_reference_id_table():\n \"\"\"\n Gathers all lake names from data sources and assigns a unique ID number\n _This should only be used on initial DB creation_\n otherwise use update_reference_id_table()\n :return: None\n \"\"\"\n\n import pandas as pd\n from sqlalchemy import create_engine\n from lake_table_usgs import update_usgs_lake_names\n import config\n\n username = config.username\n password = config.password\n\n confirmation = input('Are you sure you want to replace the entire database?\\nType \"yes\" to continue: ')\n if confirmation != 'yes':\n print('Confirmation not valid. No action taken')\n else:\n # Create database connection engines and cursor\n sql_engine = create_engine('mysql+pymysql://' + username + ':' + password +\n '@lake-test1.cevt7olsswvw.us-east-2.rds.amazonaws.com:3306/laketest').connect()\n\n # Get lake names from hydroweb, drop metadata, add source info\n print('--------Gathering Hydroweb lake information--------')\n hydroweb_url = 'http://hydroweb.theia-land.fr/hydroweb/authdownload?list=lakes&format=txt'\n hydroweb_df = pd.read_csv(hydroweb_url)\n hydroweb_df = hydroweb_df.rename(columns={'lake': 'lake_name'})\n hydroweb_df = hydroweb_df.filter(['lake_name'])\n hydroweb_df.insert(0, 'source', 'hydroweb')\n\n # Get lake names from usgs, drop metadata, add source info\n print('--------Gathering USGS lake information--------')\n usgs_source_df_raw = update_usgs_lake_names()\n\n usgs_df = usgs_source_df_raw.rename(columns={'station_nm': 'lake_name'})\n usgs_df = usgs_df.filter(['lake_name'])\n usgs_df.insert(0, 'source', 'usgs')\n\n # Get lake names from grealm, drop metadata, add source info\n print('--------Gathering G-Realm lake information--------')\n grealm_url = 'https://ipad.fas.usda.gov/lakes/images/LakesReservoirsCSV.txt'\n grealm_df = pd.read_csv(grealm_url, skiprows=3, sep=\"\\t\", header=0, parse_dates=[-1],\n infer_datetime_format=True, on_bad_lines='skip', skip_blank_lines=True)\n grealm_df = grealm_df[~grealm_df['Lake ID'].str.contains(\"Total\")]\n grealm_df = grealm_df.rename(columns={'Name': 'lake_name'})\n grealm_df.loc[grealm_df.lake_name.duplicated(keep=False), 'lake_name'] = \\\n grealm_df.loc[grealm_df.lake_name.duplicated(keep=False), 'lake_name'] + '_' + \\\n grealm_df.loc[grealm_df.lake_name.duplicated(keep=False), 'Resolution'].astype(str)\n grealm_df = grealm_df.filter(['lake_name'])\n grealm_df.insert(0, 'source', 'grealm')\n\n lake_reference_df = pd.concat([hydroweb_df, usgs_df, grealm_df], ignore_index=True)\n\n print('--------Overwriting database--------')\n lake_reference_df.to_sql('reference_ID', con=sql_engine, if_exists='replace', index_label='id_No',\n chunksize=100000)\n return lake_reference_df\n\n\ndef update_reference_id_table():\n \"\"\"\n Create DataFrame of lake names to pass to updater function\n :return: Pandas DataFrame of lakes to be updated\n \"\"\"\n import pandas as pd\n from sqlalchemy import create_engine\n from utiils import get_ref_table\n from lake_table_usgs import get_usgs_sites\n from lake_table_usgs import update_usgs_lake_names\n import pymysql\n import config\n\n username = config.username\n password = config.password\n\n # Create database connection\n connection = pymysql.connect(host='lake-test1.cevt7olsswvw.us-east-2.rds.amazonaws.com:3306/laketest',\n user=username,\n password=password,\n db='laketest')\n cursor = connection.cursor()\n\n reference_table = get_ref_table()\n reference_table = reference_table.drop('metadata', axis=1)\n print('Reference Table Read')\n\n grealm_url = 'https://ipad.fas.usda.gov/lakes/images/LakesReservoirsCSV.txt'\n grealm_source_df = pd.read_csv(grealm_url, skiprows=3, sep=\"\\t\", header=0, parse_dates=[-1],\n infer_datetime_format=True, on_bad_lines='skip', skip_blank_lines=True)\n grealm_source_df = grealm_source_df[~grealm_source_df['Lake ID'].str.contains(\"Total\")]\n grealm_source_df = grealm_source_df.rename(columns={'Name': 'lake_name'})\n grealm_source_df.loc[grealm_source_df.lake_name.duplicated(keep=False), 'lake_name'] = \\\n grealm_source_df.loc[grealm_source_df.lake_name.duplicated(keep=False), 'lake_name'] + '_' + \\\n grealm_source_df.loc[grealm_source_df.lake_name.duplicated(keep=False), 'Resolution'].astype(str)\n grealm_source_df = grealm_source_df.filter(['lake_name'])\n\n\n # Merge reference and grealm tables while keeping unique lake ID number from db, convert to json dict\n grealm_existing_lakes_table = reference_table.loc[reference_table['source'] == 'grealm']\n grealm_ready_df = grealm_source_df[~grealm_source_df.lake_name.isin(grealm_existing_lakes_table['lake_name'])]\n grealm_ready_df.insert(0, 'source', 'grealm')\n print(\"New GREALM-USDA Lakes ready for insertion\")\n\n # Grab hydroweb source data\n hydroweb_url = 'http://hydroweb.theia-land.fr/hydroweb/authdownload?list=lakes&format=txt'\n hydroweb_source_df = pd.read_csv(hydroweb_url)\n hydroweb_source_df = hydroweb_source_df.rename(columns={'lake': 'lake_name'})\n hydroweb_source_df = hydroweb_source_df.filter(['lake_name'])\n\n hydroweb_existing_lake_df = reference_table.loc[reference_table['source'] == 'hydroweb']\n hydroweb_ready_df = hydroweb_source_df[~hydroweb_source_df.lake_name.isin(hydroweb_existing_lake_df['lake_name'])]\n hydroweb_ready_df.insert(0, 'source', 'hydroweb')\n print(\"New HydroWeb Lakes ready for insertion\")\n\n # Grab usgs source data\n usgs_source_df_raw = update_usgs_lake_names()\n usgs_source_df = usgs_source_df_raw.rename(columns={'station_nm': 'lake_name'})\n usgs_source_df = usgs_source_df.filter(['lake_name'])\n usgs_existing_lake_df = reference_table.loc[reference_table['source'] == 'usgs']\n # TODO: Lake names are not copying over\n usgs_ready_df = usgs_source_df[~usgs_source_df.lake_name.isin(usgs_existing_lake_df['lake_name'])]\n usgs_ready_df.insert(0, 'source', 'usgs')\n print(\"New USGS-NWIS Lakes ready for insertion\")\n\n sql_ready_df = pd.concat([grealm_ready_df, hydroweb_ready_df, usgs_ready_df], ignore_index=True)\n\n sql_command = \"INSERT IGNORE INTO reference_ID(lake_name, `source`) VALUES (%s, %s);\"\n for name, source in zip(sql_ready_df['lake_name'], sql_ready_df['source']):\n cursor.execute(sql_command, (name, source))\n connection.commit()\n connection.close()\n return usgs_source_df_raw\n"
] | [
[
"pandas.concat",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
best-of-acrv/fcos | [
"47e5624973b256b8c74ce2c00fca50e62c19c66a"
] | [
"fcos/core/modeling/rpn/retinanet/inference.py"
] | [
"import torch\r\n\r\nfrom ..inference import RPNPostProcessor\r\nfrom ..utils import permute_and_flatten\r\nfrom ....modeling.box_coder import BoxCoder\r\nfrom ....structures.bounding_box import BoxList\r\nfrom ....structures.boxlist_ops import cat_boxlist\r\nfrom ....structures.boxlist_ops import boxlist_nms\r\nfrom ....structures.boxlist_ops import remove_small_boxes\r\n\r\n\r\nclass RetinaNetPostProcessor(RPNPostProcessor):\r\n \"\"\"\r\n Performs post-processing on the outputs of the RetinaNet boxes.\r\n This is only used in the testing.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n pre_nms_thresh,\r\n pre_nms_top_n,\r\n nms_thresh,\r\n fpn_post_nms_top_n,\r\n min_size,\r\n num_classes,\r\n box_coder=None,\r\n ):\r\n \"\"\"\r\n Arguments:\r\n pre_nms_thresh (float)\r\n pre_nms_top_n (int)\r\n nms_thresh (float)\r\n fpn_post_nms_top_n (int)\r\n min_size (int)\r\n num_classes (int)\r\n box_coder (BoxCoder)\r\n \"\"\"\r\n super(RetinaNetPostProcessor, self).__init__(pre_nms_thresh, 0,\r\n nms_thresh, min_size)\r\n self.pre_nms_thresh = pre_nms_thresh\r\n self.pre_nms_top_n = pre_nms_top_n\r\n self.nms_thresh = nms_thresh\r\n self.fpn_post_nms_top_n = fpn_post_nms_top_n\r\n self.min_size = min_size\r\n self.num_classes = num_classes\r\n\r\n if box_coder is None:\r\n box_coder = BoxCoder(weights=(10., 10., 5., 5.))\r\n self.box_coder = box_coder\r\n\r\n def add_gt_proposals(self, proposals, targets):\r\n \"\"\"\r\n This function is not used in RetinaNet\r\n \"\"\"\r\n pass\r\n\r\n def forward_for_single_feature_map(self, anchors, box_cls, box_regression):\r\n \"\"\"\r\n Arguments:\r\n anchors: list[BoxList]\r\n box_cls: tensor of size N, A * C, H, W\r\n box_regression: tensor of size N, A * 4, H, W\r\n \"\"\"\r\n device = box_cls.device\r\n N, _, H, W = box_cls.shape\r\n A = box_regression.size(1) // 4\r\n C = box_cls.size(1) // A\r\n\r\n # put in the same format as anchors\r\n box_cls = permute_and_flatten(box_cls, N, A, C, H, W)\r\n box_cls = box_cls.sigmoid()\r\n\r\n box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)\r\n box_regression = box_regression.reshape(N, -1, 4)\r\n\r\n num_anchors = A * H * W\r\n\r\n candidate_inds = box_cls > self.pre_nms_thresh\r\n\r\n pre_nms_top_n = candidate_inds.view(N, -1).sum(1)\r\n pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)\r\n\r\n results = []\r\n for per_box_cls, per_box_regression, per_pre_nms_top_n, \\\r\n per_candidate_inds, per_anchors in zip(\r\n box_cls,\r\n box_regression,\r\n pre_nms_top_n,\r\n candidate_inds,\r\n anchors):\r\n\r\n # Sort and select TopN\r\n # TODO most of this can be made out of the loop for\r\n # all images.\r\n # TODO:Yang: Not easy to do. Because the numbers of detections are\r\n # different in each image. Therefore, this part needs to be done\r\n # per image.\r\n per_box_cls = per_box_cls[per_candidate_inds]\r\n\r\n per_box_cls, top_k_indices = \\\r\n per_box_cls.topk(per_pre_nms_top_n, sorted=False)\r\n\r\n per_candidate_nonzeros = \\\r\n per_candidate_inds.nonzero()[top_k_indices, :]\r\n\r\n per_box_loc = per_candidate_nonzeros[:, 0]\r\n per_class = per_candidate_nonzeros[:, 1]\r\n per_class += 1\r\n\r\n detections = self.box_coder.decode(\r\n per_box_regression[per_box_loc, :].view(-1, 4),\r\n per_anchors.bbox[per_box_loc, :].view(-1, 4))\r\n\r\n boxlist = BoxList(detections, per_anchors.size, mode=\"xyxy\")\r\n boxlist.add_field(\"labels\", per_class)\r\n boxlist.add_field(\"scores\", per_box_cls)\r\n boxlist = boxlist.clip_to_image(remove_empty=False)\r\n boxlist = remove_small_boxes(boxlist, self.min_size)\r\n results.append(boxlist)\r\n\r\n return results\r\n\r\n # TODO very similar to filter_results from PostProcessor\r\n # but filter_results is per image\r\n # TODO Yang: solve this issue in the future. No good solution\r\n # right now.\r\n def select_over_all_levels(self, boxlists):\r\n num_images = len(boxlists)\r\n results = []\r\n for i in range(num_images):\r\n scores = boxlists[i].get_field(\"scores\")\r\n labels = boxlists[i].get_field(\"labels\")\r\n boxes = boxlists[i].bbox\r\n boxlist = boxlists[i]\r\n result = []\r\n # skip the background\r\n for j in range(1, self.num_classes):\r\n inds = (labels == j).nonzero().view(-1)\r\n\r\n scores_j = scores[inds]\r\n boxes_j = boxes[inds, :].view(-1, 4)\r\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\r\n boxlist_for_class.add_field(\"scores\", scores_j)\r\n boxlist_for_class = boxlist_nms(boxlist_for_class,\r\n self.nms_thresh,\r\n score_field=\"scores\")\r\n num_labels = len(boxlist_for_class)\r\n boxlist_for_class.add_field(\r\n \"labels\",\r\n torch.full((num_labels,),\r\n j,\r\n dtype=torch.int64,\r\n device=scores.device))\r\n result.append(boxlist_for_class)\r\n\r\n result = cat_boxlist(result)\r\n number_of_detections = len(result)\r\n\r\n # Limit to max_per_image detections **over all classes**\r\n if number_of_detections > self.fpn_post_nms_top_n > 0:\r\n cls_scores = result.get_field(\"scores\")\r\n image_thresh, _ = torch.kthvalue(\r\n cls_scores.cpu(),\r\n number_of_detections - self.fpn_post_nms_top_n + 1)\r\n keep = cls_scores >= image_thresh.item()\r\n keep = torch.nonzero(keep).squeeze(1)\r\n result = result[keep]\r\n results.append(result)\r\n return results\r\n\r\n\r\ndef make_retinanet_postprocessor(config, rpn_box_coder, is_train):\r\n pre_nms_thresh = config.MODEL.RETINANET.INFERENCE_TH\r\n pre_nms_top_n = config.MODEL.RETINANET.PRE_NMS_TOP_N\r\n nms_thresh = config.MODEL.RETINANET.NMS_TH\r\n fpn_post_nms_top_n = config.TEST.DETECTIONS_PER_IMG\r\n min_size = 0\r\n\r\n box_selector = RetinaNetPostProcessor(\r\n pre_nms_thresh=pre_nms_thresh,\r\n pre_nms_top_n=pre_nms_top_n,\r\n nms_thresh=nms_thresh,\r\n fpn_post_nms_top_n=fpn_post_nms_top_n,\r\n min_size=min_size,\r\n num_classes=config.MODEL.RETINANET.NUM_CLASSES,\r\n box_coder=rpn_box_coder,\r\n )\r\n\r\n return box_selector\r\n"
] | [
[
"torch.nonzero",
"torch.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
timoteogb/finn-hlslib | [
"6efd5dee886ba7cc542ab69ae3c8b09d4a1ed1af"
] | [
"tb/gen_params_stmr.py"
] | [
"# Copyright (c) 2021, Xilinx, Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n\n################################################################################\n#\n# Authors: Timoteo Garcia Bertoa <[email protected]> \n#\n# \\file gen_weights_stmr.py\n#\n# This file checks PE/OFM channels mapping for TMR implementation, and \n# generates weights with/without error injection for triplicated channels as\n# part of the OFM, generating config.h and memdata.h files to use in the TMR\n# testbench.\n#\n################################################################################\n\nimport numpy as np\nimport os\nimport sys\nimport random \nimport subprocess\nimport tmrmapping as TMR\n\n#random.seed(1)\n#np.random.seed(1)\n\n# Define parameters\nkernel_dim = 2 \nstride = 1\ninput_precision = 8\nifm_channels = 2\nofm_channels = 8\nifm_dimension = 3\nofm_dimension = 2\nactivation_precision = 16\nexpand = 1\nsimd = 2\npe = 6\nw_precision = 8\nmmv = 1\nredf = 3\nnum_red = 2\nmax_ch_width = 6\n\n# Define how many errors to generate in the triplications (if injection is used)\nnum_trip_inj = 2 # Set this value to any number between 1 and number of triplications (num_red)\nnum_errors_pe = 2 # Set this value to any number between 1 and redundancy factor (redf)\n\n# Check validity of the PE selection\nTMR.isvalid(pe, ofm_channels, redf, num_red)\n\n# Initialise arrays\nofm_ch_red = ofm_channels + (num_red * (redf - 1))\ntile = ifm_channels *kernel_dim*kernel_dim * ofm_channels // (simd * ofm_channels) #assuming pe=ofm_channels to generate weights\ntile_tri = tile * int(ofm_ch_red/pe) #tile when OFM contains triplications\nw_t_values = np.zeros((ofm_ch_red, tile)) # weights including triplications\nw_values = np.zeros((ofm_channels, tile)) # original weights\nwidth = simd * w_precision\n\n# Generate the set of weights\nfor p in range(ofm_channels):\n for t in range(tile): \n w_values[p][t] = random.randint(0, 1 << width-1) \n \n# Channels to triplicate\nch_list = [1, 3]\n\n# Retrieve triplicated weights, channel_mask and red_ch_index\nw_t_values, channel_mask, red_ch_index = TMR.map(w_values, ofm_channels, pe, redf, num_red, ch_list)\n\n# Transform channel mask to binary\ntobin = '{0:0' + str(ofm_ch_red) + 'b}'\nchannel_mask = tobin.format(channel_mask)\n \n# Check if user gave argument\nif((len(sys.argv) == 1) or (len(sys.argv) > 2)):\n \n print(\"Error: Please, provide a valid, single argument, it should be either 'inj', 'no_inj' or 'tmrcheck'.\")\n exit(1) \n \n# User wants to inject errors\nelif(str(sys.argv[1]) == 'inj'):\n memdataname = \"memdata_inj.h\"\n configname = \"config_inj.h\"\n injecting = 'true'\n print(\"Injecting errors among the triplicated channels...\")\n \n # Separate weights in case there is folding, to inject errors to individual blocks:\n w_t_values = np.concatenate(np.hsplit(w_t_values, ofm_ch_red/pe), axis=0)\n # With the current values, a single error is being injected to 2 triplets (there is chance to be the same triplet twice)\n for j in range (num_trip_inj):\n which_triplet = random.randint(0, num_red-1)\n temp_array = [red_ch_index[which_triplet][0], red_ch_index[which_triplet][0]+1, red_ch_index[which_triplet][0]+2]\n for i in range (num_errors_pe):\n which_triplication = int(np.random.choice(temp_array)) \n w_t_values[which_triplication][random.randint(0, tile-1)] = random.randint(0, 1<<width-1) \n # Put weights back to the previous position \n w_t_values = np.concatenate(np.array_split(w_t_values, ofm_ch_red/pe), axis=1) \n skip = 0\n \n# User doesn't want to inject errors \nelif(str(sys.argv[1]) == 'no_inj'): \n memdataname = \"memdata_noinj.h\"\n configname = \"config_noinj.h\"\n injecting = 'false'\n print(\"Error injection has been skipped.\") \n skip = 0\n \nelif(str(sys.argv[1]) == 'tmrcheck'): \n memdataname = \"memdata_tmrc.h\"\n configname = \"config_tmrc.h\"\n skip = 1\nelse: \n print(\"Error: Please, provide a valid argument, it should be either 'inj', 'no_inj' or 'tmrcheck'.\")\n exit(1) \n \n# Write config.h output file\noutFileConfig = open(configname , \"wt\")\noutFileConfig.write(\"#define KERNEL_DIM %d \\n\" % kernel_dim)\noutFileConfig.write(\"#define SIMD1 %d \\n\" % simd)\noutFileConfig.write(\"#define PE1 %d \\n\" % pe)\noutFileConfig.write(\"#define MMV1 %d \\n\" % mmv)\noutFileConfig.write(\"#define WIDTH %d \\n\" % w_precision)\noutFileConfig.write(\"#define IFM_Channels1 %d \\n\" % ifm_channels)\noutFileConfig.write(\"#define OFM_Channels1 %d // OFM_Channels %d + redundancies \\n\" % (ofm_ch_red, ofm_channels))\noutFileConfig.write(\"#define IFMDim1 %d \\n\" % ifm_dimension)\noutFileConfig.write(\"#define OFMDim1 %d \\n\" % ofm_dimension)\noutFileConfig.write(\"#define STRIDE %d \\n\" % stride)\noutFileConfig.write(\"#define INPUT_PRECISION %d \\n\" % input_precision)\noutFileConfig.write(\"#define TILE1 %d \\n\" % tile_tri)\noutFileConfig.write(\"#define ACTIVATION_PRECISION %d \\n\" % activation_precision)\noutFileConfig.write(\"#define REDF %d \\n\" % redf)\noutFileConfig.write(\"#define NUM_RED %d \\n\" % num_red)\noutFileConfig.write(\"#define MAX_CH_WIDTH %d // 2^6 = 64 channel indexes \\n\" % max_ch_width)\nif skip == 0:\n outFileConfig.write(\"#define INJ %s // Have errors been injected? \\n\" % injecting)\noutFileConfig.close()\n\n# Write memdata.h output file\noutFileWeights = open(memdataname , \"wt\")\noutFileWeights.write(\"#ifndef PARAMS_HPP\\n\")\noutFileWeights.write(\"#define PARAMS_HPP\\n\")\noutFileWeights.write(\"namespace PARAM{ \\n\")\n\nif skip == 0:\n if (w_precision == 1):\n outFileWeights.write(\"static BinaryWeights<%d,%d,%d> weights= {\\n{\\n\" %(simd,pe,tile * int(ofm_ch_red/pe)))\n else:\n outFileWeights.write(\"static FixedPointWeights<%d,ap_int<%d>,%d,%d> weights= {\\n{\\n\" %(simd,w_precision,pe,tile_tri)) \n \n for p in range(pe):\n outFileWeights.write(\"{ \\n\")\n for t in range(tile * int(ofm_ch_red/pe)):\n outFileWeights.write(\"%s\" % hex(int(w_t_values[p][t])))\n if t!=tile * int(ofm_ch_red/pe)-1:\n outFileWeights.write(\",\\n\")\n outFileWeights.write(\"} \\n\")\n if p!=pe-1:\n outFileWeights.write(\",\")\n outFileWeights.write(\"}\\n};\\n\\n\")\n \noutFileWeights.write(\"static const ap_uint<%d> channel_mask = 0b%s;\\n\\n\" %(ofm_ch_red, channel_mask))\noutFileWeights.write(\"static ap_uint<%d> red_ch_index[%d] = {\" %(max_ch_width, num_red))\n\nfor t in range(num_red):\n outFileWeights.write(\"%s\" % int(red_ch_index[t][0]))\n if t != num_red-1:\n outFileWeights.write(\",\")\noutFileWeights.write(\"};\") \n \noutFileWeights.write(\"\\n\\n}\\n\\n\") \noutFileWeights.write(\"#endif \\n\")\noutFileWeights.close()\n\nprint(\"Done! 'config.h' and 'memdata.h' generated.\")\n"
] | [
[
"numpy.array_split",
"numpy.zeros",
"numpy.hsplit",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
markneville/CSI-Net | [
"9c78e46d9e2ff9d5813862e83879f6d88ea56e4a"
] | [
"model/resnet_upsample.py"
] | [
"import scipy.io as sio\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nimport numpy as np\r\n# from model import locNN\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n\r\n# 3x3 Convolution\r\ndef conv3x3(in_channels, out_channels, stride=1):\r\n return nn.Conv2d(in_channels, out_channels, kernel_size=3,\r\n stride=stride, padding=1, bias=False)\r\n\r\n# Residual Block\r\nclass ResidualBlock(nn.Module):\r\n expansion = 1\r\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\r\n super(ResidualBlock, self).__init__()\r\n self.conv1 = conv3x3(in_channels, out_channels, stride)\r\n self.bn1 = nn.BatchNorm2d(out_channels)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = conv3x3(out_channels, out_channels)\r\n self.bn2 = nn.BatchNorm2d(out_channels)\r\n self.downsample = downsample\r\n\r\n def forward(self, x):\r\n residual = x\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n if self.downsample:\r\n residual = self.downsample(x)\r\n out += residual\r\n out = self.relu(out)\r\n return out\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n\r\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(out_channels)\r\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(out_channels)\r\n self.conv3 = nn.Conv2d(out_channels, out_channels * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(out_channels * 4)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\n\r\n# ResNet Module\r\nclass ResNet(nn.Module):\r\n def __init__(self, block, layers, num_classes):\r\n super(ResNet, self).__init__()\r\n\r\n self.upsampling = nn.Upsample(mode='bilinear', scale_factor=224, align_corners=False)\r\n\r\n\r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(30, 64, kernel_size=7, stride=2, padding=3, bias=False),\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\r\n )\r\n\r\n self.in_channels = 64\r\n self.layer1 = self.make_layer(block, 64, layers[0]) #8*64*64 -> 16*32*32\r\n self.layer2 = self.make_layer(block, 128, layers[1], 2) #16*32*32 -> 32*16*16\r\n self.layer3 = self.make_layer(block, 256, layers[2], 2) #32*16*16 -> 64*8*8\r\n self.layer4 = self.make_layer(block, 512, layers[3], 2) #64*8*8 -> 128*4*4\r\n\r\n self.humanid = nn.Sequential(\r\n nn.Conv2d(512 * block.expansion, 512 * block.expansion, kernel_size=3, stride=2, padding=1, bias=False),\r\n nn.BatchNorm2d(512 * block.expansion),\r\n nn.ReLU(),\r\n\r\n nn.Conv2d(512 * block.expansion, 256 * block.expansion, kernel_size=4, stride=2, padding=1, bias=False),\r\n nn.BatchNorm2d(256 * block.expansion),\r\n nn.ReLU(),\r\n nn.AvgPool2d(2),\r\n )\r\n\r\n self.fcc = nn.Linear(256 * block.expansion, num_classes)\r\n\r\n self.biometrics = nn.Sequential(\r\n nn.Conv2d(512 * block.expansion, 512 * block.expansion, kernel_size=3, stride=2, padding=1, bias=False),\r\n nn.BatchNorm2d(512 * block.expansion),\r\n nn.ReLU(),\r\n\r\n nn.Conv2d(512 * block.expansion, 256 * block.expansion, kernel_size=4, stride=2, padding=1, bias=False),\r\n nn.BatchNorm2d(256 * block.expansion),\r\n nn.ReLU(),\r\n nn.AvgPool2d(2),\r\n )\r\n\r\n self.fcr = nn.Linear(256 * block.expansion, 4)\r\n\r\n def make_layer(self, block, out_channels, blocks, stride=1):\r\n downsample = None\r\n if (stride != 1) or (self.in_channels != out_channels * block.expansion):\r\n downsample = nn.Sequential(\r\n conv3x3(self.in_channels, out_channels * block.expansion, stride=stride),\r\n nn.BatchNorm2d(out_channels * block.expansion))\r\n layers = []\r\n layers.append(block(self.in_channels, out_channels, stride, downsample))\r\n self.in_channels = out_channels * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.in_channels, out_channels))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n out = self.upsampling(x)\r\n out = self.conv1(out)\r\n #\r\n out = self.layer1(out)\r\n out = self.layer2(out)\r\n out = self.layer3(out)\r\n out = self.layer4(out)\r\n #\r\n # #\r\n # out = self.avgpool7(out)\r\n out1 = self.humanid(out)\r\n out1 = out1.squeeze()\r\n out1 = self.fcc(out1)\r\n # # # #\r\n # #\r\n out2 = self.biometrics(out)\r\n out2 = out2.squeeze()\r\n out2 = F.relu(self.fcr(out2))\r\n\r\n return out1, out2"
] | [
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tyomitch/nltk | [
"943b7bb3181118710ea4f22e0b63ce25adfffa08"
] | [
"nltk/cluster/util.py"
] | [
"# Natural Language Toolkit: Clusterer Utilities\n#\n# Copyright (C) 2001-2022 NLTK Project\n# Author: Trevor Cohn <[email protected]>\n# Contributor: J Richard Snape\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\nimport copy\nfrom abc import abstractmethod\nfrom math import sqrt\nfrom sys import stdout\n\ntry:\n import numpy\nexcept ImportError:\n pass\n\nfrom nltk.cluster.api import ClusterI\n\n\nclass VectorSpaceClusterer(ClusterI):\n \"\"\"\n Abstract clusterer which takes tokens and maps them into a vector space.\n Optionally performs singular value decomposition to reduce the\n dimensionality.\n \"\"\"\n\n def __init__(self, normalise=False, svd_dimensions=None):\n \"\"\"\n :param normalise: should vectors be normalised to length 1\n :type normalise: boolean\n :param svd_dimensions: number of dimensions to use in reducing vector\n dimensionsionality with SVD\n :type svd_dimensions: int\n \"\"\"\n self._Tt = None\n self._should_normalise = normalise\n self._svd_dimensions = svd_dimensions\n\n def cluster(self, vectors, assign_clusters=False, trace=False):\n assert len(vectors) > 0\n\n # normalise the vectors\n if self._should_normalise:\n vectors = list(map(self._normalise, vectors))\n\n # use SVD to reduce the dimensionality\n if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):\n [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))\n S = d[: self._svd_dimensions] * numpy.identity(\n self._svd_dimensions, numpy.float64\n )\n T = u[:, : self._svd_dimensions]\n Dt = vt[: self._svd_dimensions, :]\n vectors = numpy.transpose(numpy.dot(S, Dt))\n self._Tt = numpy.transpose(T)\n\n # call abstract method to cluster the vectors\n self.cluster_vectorspace(vectors, trace)\n\n # assign the vectors to clusters\n if assign_clusters:\n return [self.classify(vector) for vector in vectors]\n\n @abstractmethod\n def cluster_vectorspace(self, vectors, trace):\n \"\"\"\n Finds the clusters using the given set of vectors.\n \"\"\"\n\n def classify(self, vector):\n if self._should_normalise:\n vector = self._normalise(vector)\n if self._Tt is not None:\n vector = numpy.dot(self._Tt, vector)\n cluster = self.classify_vectorspace(vector)\n return self.cluster_name(cluster)\n\n @abstractmethod\n def classify_vectorspace(self, vector):\n \"\"\"\n Returns the index of the appropriate cluster for the vector.\n \"\"\"\n\n def likelihood(self, vector, label):\n if self._should_normalise:\n vector = self._normalise(vector)\n if self._Tt is not None:\n vector = numpy.dot(self._Tt, vector)\n return self.likelihood_vectorspace(vector, label)\n\n def likelihood_vectorspace(self, vector, cluster):\n \"\"\"\n Returns the likelihood of the vector belonging to the cluster.\n \"\"\"\n predicted = self.classify_vectorspace(vector)\n return 1.0 if cluster == predicted else 0.0\n\n def vector(self, vector):\n \"\"\"\n Returns the vector after normalisation and dimensionality reduction\n \"\"\"\n if self._should_normalise:\n vector = self._normalise(vector)\n if self._Tt is not None:\n vector = numpy.dot(self._Tt, vector)\n return vector\n\n def _normalise(self, vector):\n \"\"\"\n Normalises the vector to unit length.\n \"\"\"\n return vector / sqrt(numpy.dot(vector, vector))\n\n\ndef euclidean_distance(u, v):\n \"\"\"\n Returns the euclidean distance between vectors u and v. This is equivalent\n to the length of the vector (u - v).\n \"\"\"\n diff = u - v\n return sqrt(numpy.dot(diff, diff))\n\n\ndef cosine_distance(u, v):\n \"\"\"\n Returns 1 minus the cosine of the angle between vectors v and u. This is\n equal to ``1 - (u.v / |u||v|)``.\n \"\"\"\n return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))\n\n\nclass _DendrogramNode:\n \"\"\"Tree node of a dendrogram.\"\"\"\n\n def __init__(self, value, *children):\n self._value = value\n self._children = children\n\n def leaves(self, values=True):\n if self._children:\n leaves = []\n for child in self._children:\n leaves.extend(child.leaves(values))\n return leaves\n elif values:\n return [self._value]\n else:\n return [self]\n\n def groups(self, n):\n queue = [(self._value, self)]\n\n while len(queue) < n:\n priority, node = queue.pop()\n if not node._children:\n queue.push((priority, node))\n break\n for child in node._children:\n if child._children:\n queue.append((child._value, child))\n else:\n queue.append((0, child))\n # makes the earliest merges at the start, latest at the end\n queue.sort()\n\n groups = []\n for priority, node in queue:\n groups.append(node.leaves())\n return groups\n\n def __lt__(self, comparator):\n return cosine_distance(self._value, comparator._value) < 0\n\n\nclass Dendrogram:\n \"\"\"\n Represents a dendrogram, a tree with a specified branching order. This\n must be initialised with the leaf items, then iteratively call merge for\n each branch. This class constructs a tree representing the order of calls\n to the merge function.\n \"\"\"\n\n def __init__(self, items=[]):\n \"\"\"\n :param items: the items at the leaves of the dendrogram\n :type items: sequence of (any)\n \"\"\"\n self._items = [_DendrogramNode(item) for item in items]\n self._original_items = copy.copy(self._items)\n self._merge = 1\n\n def merge(self, *indices):\n \"\"\"\n Merges nodes at given indices in the dendrogram. The nodes will be\n combined which then replaces the first node specified. All other nodes\n involved in the merge will be removed.\n\n :param indices: indices of the items to merge (at least two)\n :type indices: seq of int\n \"\"\"\n assert len(indices) >= 2\n node = _DendrogramNode(self._merge, *(self._items[i] for i in indices))\n self._merge += 1\n self._items[indices[0]] = node\n for i in indices[1:]:\n del self._items[i]\n\n def groups(self, n):\n \"\"\"\n Finds the n-groups of items (leaves) reachable from a cut at depth n.\n :param n: number of groups\n :type n: int\n \"\"\"\n if len(self._items) > 1:\n root = _DendrogramNode(self._merge, *self._items)\n else:\n root = self._items[0]\n return root.groups(n)\n\n def show(self, leaf_labels=[]):\n \"\"\"\n Print the dendrogram in ASCII art to standard out.\n\n :param leaf_labels: an optional list of strings to use for labeling the\n leaves\n :type leaf_labels: list\n \"\"\"\n\n # ASCII rendering characters\n JOIN, HLINK, VLINK = \"+\", \"-\", \"|\"\n\n # find the root (or create one)\n if len(self._items) > 1:\n root = _DendrogramNode(self._merge, *self._items)\n else:\n root = self._items[0]\n leaves = self._original_items\n\n if leaf_labels:\n last_row = leaf_labels\n else:\n last_row = [\"%s\" % leaf._value for leaf in leaves]\n\n # find the bottom row and the best cell width\n width = max(map(len, last_row)) + 1\n lhalf = width // 2\n rhalf = int(width - lhalf - 1)\n\n # display functions\n def format(centre, left=\" \", right=\" \"):\n return f\"{lhalf * left}{centre}{right * rhalf}\"\n\n def display(str):\n stdout.write(str)\n\n # for each merge, top down\n queue = [(root._value, root)]\n verticals = [format(\" \") for leaf in leaves]\n while queue:\n priority, node = queue.pop()\n child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))\n indices = list(map(leaves.index, child_left_leaf))\n if child_left_leaf:\n min_idx = min(indices)\n max_idx = max(indices)\n for i in range(len(leaves)):\n if leaves[i] in child_left_leaf:\n if i == min_idx:\n display(format(JOIN, \" \", HLINK))\n elif i == max_idx:\n display(format(JOIN, HLINK, \" \"))\n else:\n display(format(JOIN, HLINK, HLINK))\n verticals[i] = format(VLINK)\n elif min_idx <= i <= max_idx:\n display(format(HLINK, HLINK, HLINK))\n else:\n display(verticals[i])\n display(\"\\n\")\n for child in node._children:\n if child._children:\n queue.append((child._value, child))\n queue.sort()\n\n for vertical in verticals:\n display(vertical)\n display(\"\\n\")\n\n # finally, display the last line\n display(\"\".join(item.center(width) for item in last_row))\n display(\"\\n\")\n\n def __repr__(self):\n if len(self._items) > 1:\n root = _DendrogramNode(self._merge, *self._items)\n else:\n root = self._items[0]\n leaves = root.leaves(False)\n return \"<Dendrogram with %d leaves>\" % len(leaves)\n"
] | [
[
"numpy.dot",
"numpy.array",
"numpy.identity",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kunde122/ERNIE1 | [
"050327e968b2d7d9090ab882a5dd6b0fdeca80b4"
] | [
"ernie/finetune/sequence_label.py"
] | [
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\n\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport multiprocessing\n\nimport paddle\nimport logging\nimport paddle.fluid as fluid\n\nfrom six.moves import xrange\n\nfrom model.ernie import ErnieModel\n\nlog = logging.getLogger(__name__)\n\ndef create_model(args, pyreader_name, ernie_config, is_prediction=False):\n src_ids = fluid.layers.data(name='1', shape=[-1, args.max_seq_len, 1], dtype='int64')\n sent_ids = fluid.layers.data(name='2', shape=[-1, args.max_seq_len, 1], dtype='int64')\n pos_ids = fluid.layers.data(name='3', shape=[-1, args.max_seq_len, 1], dtype='int64')\n task_ids = fluid.layers.data(name='4', shape=[-1, args.max_seq_len, 1], dtype='int64')\n input_mask = fluid.layers.data(name='5', shape=[-1, args.max_seq_len, 1], dtype='float32')\n labels = fluid.layers.data(name='7', shape=[-1, args.max_seq_len, 1], dtype='int64')\n seq_lens = fluid.layers.data(name='8', shape=[-1], dtype='int64')\n\n pyreader = fluid.io.DataLoader.from_generator(feed_list=[src_ids, sent_ids, pos_ids, task_ids, input_mask, labels, seq_lens], \n capacity=70,\n iterable=False)\n\n ernie = ErnieModel(\n src_ids=src_ids,\n position_ids=pos_ids,\n sentence_ids=sent_ids,\n task_ids=task_ids,\n input_mask=input_mask,\n config=ernie_config,\n use_fp16=args.use_fp16)\n\n enc_out = ernie.get_sequence_output()\n enc_out = fluid.layers.dropout(\n x=enc_out, dropout_prob=0.1, dropout_implementation=\"upscale_in_train\")\n logits = fluid.layers.fc(\n input=enc_out,\n size=args.num_labels,\n num_flatten_dims=2,\n param_attr=fluid.ParamAttr(\n name=\"cls_seq_label_out_w\",\n initializer=fluid.initializer.TruncatedNormal(scale=0.02)),\n bias_attr=fluid.ParamAttr(\n name=\"cls_seq_label_out_b\",\n initializer=fluid.initializer.Constant(0.)))\n infers = fluid.layers.argmax(logits, axis=2)\n\n ret_infers = fluid.layers.reshape(x=infers, shape=[-1, 1])\n lod_labels = fluid.layers.sequence_unpad(labels, seq_lens)\n lod_infers = fluid.layers.sequence_unpad(infers, seq_lens)\n\n (_, _, _, num_infer, num_label, num_correct) = fluid.layers.chunk_eval(\n input=lod_infers,\n label=lod_labels,\n chunk_scheme=args.chunk_scheme,\n num_chunk_types=((args.num_labels-1)//(len(args.chunk_scheme)-1)))\n\n labels = fluid.layers.flatten(labels, axis=2)\n ce_loss, probs = fluid.layers.softmax_with_cross_entropy(\n logits=fluid.layers.flatten(\n logits, axis=2),\n label=labels,\n return_softmax=True)\n input_mask = fluid.layers.flatten(input_mask, axis=2)\n ce_loss = ce_loss * input_mask\n loss = fluid.layers.mean(x=ce_loss)\n\n graph_vars = {\n \"inputs\": src_ids,\n \"loss\": loss,\n \"probs\": probs,\n \"seqlen\": seq_lens,\n \"num_infer\": num_infer,\n \"num_label\": num_label,\n \"num_correct\": num_correct,\n }\n\n for k, v in graph_vars.items():\n v.persistable = True\n\n return pyreader, graph_vars\n\n\ndef calculate_f1(num_label, num_infer, num_correct):\n if num_infer == 0:\n precision = 0.0\n else:\n precision = num_correct * 1.0 / num_infer\n\n if num_label == 0:\n recall = 0.0\n else:\n recall = num_correct * 1.0 / num_label\n\n if num_correct == 0:\n f1 = 0.0\n else:\n f1 = 2 * precision * recall / (precision + recall)\n return precision, recall, f1\n\n\ndef evaluate(exe,\n program,\n pyreader,\n graph_vars,\n tag_num,\n dev_count=1):\n fetch_list = [\n graph_vars[\"num_infer\"].name, graph_vars[\"num_label\"].name,\n graph_vars[\"num_correct\"].name\n ]\n\n total_label, total_infer, total_correct = 0.0, 0.0, 0.0\n time_begin = time.time()\n pyreader.start()\n while True:\n try:\n np_num_infer, np_num_label, np_num_correct = exe.run(program=program,\n fetch_list=fetch_list)\n total_infer += np.sum(np_num_infer)\n total_label += np.sum(np_num_label)\n total_correct += np.sum(np_num_correct)\n\n except fluid.core.EOFException:\n pyreader.reset()\n break\n\n precision, recall, f1 = calculate_f1(total_label, total_infer,\n total_correct)\n time_end = time.time()\n return \\\n \"[evaluation] f1: %f, precision: %f, recall: %f, elapsed time: %f s\" \\\n % (f1, precision, recall, time_end - time_begin)\n\n\ndef chunk_predict(np_inputs, np_probs, np_lens, dev_count=1):\n inputs = np_inputs.reshape([-1]).astype(np.int32)\n probs = np_probs.reshape([-1, np_probs.shape[-1]])\n\n all_lens = np_lens.reshape([dev_count, -1]).astype(np.int32).tolist()\n\n base_index = 0\n out = []\n for dev_index in xrange(dev_count):\n lens = all_lens[dev_index]\n max_len = 0\n for l in lens:\n max_len = max(max_len, l)\n\n for i in xrange(len(lens)):\n seq_st = base_index + i * max_len + 1\n seq_en = seq_st + (lens[i] - 2)\n prob = probs[seq_st:seq_en, :]\n infers = np.argmax(prob, -1)\n out.append((\n inputs[seq_st:seq_en].tolist(), \n infers.tolist(),\n prob.tolist()))\n base_index += max_len * len(lens)\n return out\n\n\ndef predict(exe,\n test_program,\n test_pyreader,\n graph_vars,\n dev_count=1):\n fetch_list = [\n graph_vars[\"inputs\"].name,\n graph_vars[\"probs\"].name,\n graph_vars[\"seqlen\"].name,\n ]\n\n test_pyreader.start()\n res = []\n while True:\n try:\n inputs, probs, np_lens = exe.run(program=test_program,\n fetch_list=fetch_list)\n r = chunk_predict(inputs, probs, np_lens, dev_count)\n res += r\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n return res\n\n"
] | [
[
"numpy.argmax",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
josephcslater/python-control | [
"e080cef44e718c7c0e3a286fcc3adae405936b14"
] | [
"control/tests/statesp_test.py"
] | [
"\"\"\"statesp_test.py - test state space class\n\nRMM, 30 Mar 2011 based on TestStateSp from v0.4a)\nRMM, 14 Jun 2019 statesp_array_test.py coverted from statesp_test.py to test\n with use_numpy_matrix(False)\nBG, 26 Jul 2020 merge statesp_array_test.py differences into statesp_test.py\n convert to pytest\n\"\"\"\n\nimport numpy as np\nimport pytest\nimport operator\nfrom numpy.linalg import solve\nfrom scipy.linalg import block_diag, eigvals\n\nimport control as ct\nfrom control.config import defaults\nfrom control.dtime import sample_system\nfrom control.lti import evalfr\nfrom control.statesp import (StateSpace, _convert_to_statespace, drss,\n rss, ss, tf2ss, _statesp_defaults)\nfrom control.tests.conftest import ismatarrayout, slycotonly\nfrom control.xferfcn import TransferFunction, ss2tf\n\nfrom .conftest import editsdefaults\n\nclass TestStateSpace:\n \"\"\"Tests for the StateSpace class.\"\"\"\n\n @pytest.fixture\n def sys322ABCD(self):\n \"\"\"Matrices for sys322\"\"\"\n A322 = [[-3., 4., 2.],\n [-1., -3., 0.],\n [2., 5., 3.]]\n B322 = [[1., 4.],\n [-3., -3.],\n [-2., 1.]]\n C322 = [[4., 2., -3.],\n [1., 4., 3.]]\n D322 = [[-2., 4.],\n [0., 1.]]\n return (A322, B322, C322, D322)\n\n @pytest.fixture\n def sys322(self, sys322ABCD):\n \"\"\"3-states square system (2 inputs x 2 outputs)\"\"\"\n return StateSpace(*sys322ABCD)\n\n @pytest.fixture\n def sys222(self):\n \"\"\"2-states square system (2 inputs x 2 outputs)\"\"\"\n A222 = [[4., 1.],\n [2., -3]]\n B222 = [[5., 2.],\n [-3., -3.]]\n C222 = [[2., -4],\n [0., 1.]]\n D222 = [[3., 2.],\n [1., -1.]]\n return StateSpace(A222, B222, C222, D222)\n\n @pytest.fixture\n def sys623(self):\n \"\"\"sys3: 6 states non square system (2 inputs x 3 outputs)\"\"\"\n A623 = np.array([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 3, 0, 0, 0],\n [0, 0, 0, -4, 0, 0],\n [0, 0, 0, 0, -1, 0],\n [0, 0, 0, 0, 0, 3]])\n B623 = np.array([[0, -1],\n [-1, 0],\n [1, -1],\n [0, 0],\n [0, 1],\n [-1, -1]])\n C623 = np.array([[1, 0, 0, 1, 0, 0],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 1, 0, 0, 1]])\n D623 = np.zeros((3, 2))\n return StateSpace(A623, B623, C623, D623)\n\n @pytest.mark.parametrize(\n \"dt\",\n [(), (None, ), (0, ), (1, ), (0.1, ), (True, )],\n ids=lambda i: \"dt \" + (\"unspec\" if len(i) == 0 else str(i[0])))\n @pytest.mark.parametrize(\n \"argfun\",\n [pytest.param(\n lambda ABCDdt: (ABCDdt, {}),\n id=\"A, B, C, D[, dt]\"),\n pytest.param(\n lambda ABCDdt: (ABCDdt[:4], {'dt': dt_ for dt_ in ABCDdt[4:]}),\n id=\"A, B, C, D[, dt=dt]\"),\n pytest.param(\n lambda ABCDdt: ((StateSpace(*ABCDdt), ), {}),\n id=\"sys\")\n ])\n def test_constructor(self, sys322ABCD, dt, argfun):\n \"\"\"Test different ways to call the StateSpace() constructor\"\"\"\n args, kwargs = argfun(sys322ABCD + dt)\n sys = StateSpace(*args, **kwargs)\n\n dtref = defaults['control.default_dt'] if len(dt) == 0 else dt[0]\n np.testing.assert_almost_equal(sys.A, sys322ABCD[0])\n np.testing.assert_almost_equal(sys.B, sys322ABCD[1])\n np.testing.assert_almost_equal(sys.C, sys322ABCD[2])\n np.testing.assert_almost_equal(sys.D, sys322ABCD[3])\n assert sys.dt == dtref\n\n @pytest.mark.parametrize(\"args, exc, errmsg\",\n [((True, ), TypeError,\n \"(can only take in|sys must be) a StateSpace\"),\n ((1, 2), ValueError, \"1, 4, or 5 arguments\"),\n ((np.ones((3, 2)), np.ones((3, 2)),\n np.ones((2, 2)), np.ones((2, 2))),\n ValueError, \"A must be square\"),\n ((np.ones((3, 3)), np.ones((2, 2)),\n np.ones((2, 3)), np.ones((2, 2))),\n ValueError, \"A and B\"),\n ((np.ones((3, 3)), np.ones((3, 2)),\n np.ones((2, 2)), np.ones((2, 2))),\n ValueError, \"A and C\"),\n ((np.ones((3, 3)), np.ones((3, 2)),\n np.ones((2, 3)), np.ones((2, 3))),\n ValueError, \"B and D\"),\n ((np.ones((3, 3)), np.ones((3, 2)),\n np.ones((2, 3)), np.ones((3, 2))),\n ValueError, \"C and D\"),\n ])\n def test_constructor_invalid(self, args, exc, errmsg):\n \"\"\"Test invalid input to StateSpace() constructor\"\"\"\n with pytest.raises(exc, match=errmsg):\n StateSpace(*args)\n with pytest.raises(exc, match=errmsg):\n ss(*args)\n\n def test_constructor_warns(self, sys322ABCD):\n \"\"\"Test ambiguos input to StateSpace() constructor\"\"\"\n with pytest.warns(UserWarning, match=\"received multiple dt\"):\n sys = StateSpace(*(sys322ABCD + (0.1, )), dt=0.2)\n np.testing.assert_almost_equal(sys.A, sys322ABCD[0])\n np.testing.assert_almost_equal(sys.B, sys322ABCD[1])\n np.testing.assert_almost_equal(sys.C, sys322ABCD[2])\n np.testing.assert_almost_equal(sys.D, sys322ABCD[3])\n assert sys.dt == 0.1\n\n def test_copy_constructor(self):\n \"\"\"Test the copy constructor\"\"\"\n # Create a set of matrices for a simple linear system\n A = np.array([[-1]])\n B = np.array([[1]])\n C = np.array([[1]])\n D = np.array([[0]])\n\n # Create the first linear system and a copy\n linsys = StateSpace(A, B, C, D)\n cpysys = StateSpace(linsys)\n\n # Change the original A matrix\n A[0, 0] = -2\n np.testing.assert_array_equal(linsys.A, [[-1]]) # original value\n np.testing.assert_array_equal(cpysys.A, [[-1]]) # original value\n\n # Change the A matrix for the original system\n linsys.A[0, 0] = -3\n np.testing.assert_array_equal(cpysys.A, [[-1]]) # original value\n\n def test_copy_constructor_nodt(self, sys322):\n \"\"\"Test the copy constructor when an object without dt is passed\"\"\"\n sysin = sample_system(sys322, 1.)\n del sysin.dt\n sys = StateSpace(sysin)\n assert sys.dt == defaults['control.default_dt']\n\n # test for static gain\n sysin = StateSpace([], [], [], [[1, 2], [3, 4]], 1.)\n del sysin.dt\n sys = StateSpace(sysin)\n assert sys.dt is None\n\n def test_matlab_style_constructor(self):\n \"\"\"Use (deprecated) matrix-style construction string\"\"\"\n with pytest.deprecated_call():\n sys = StateSpace(\"-1 1; 0 2\", \"0; 1\", \"1, 0\", \"0\")\n assert sys.A.shape == (2, 2)\n assert sys.B.shape == (2, 1)\n assert sys.C.shape == (1, 2)\n assert sys.D.shape == (1, 1)\n for X in [sys.A, sys.B, sys.C, sys.D]:\n assert ismatarrayout(X)\n\n def test_D_broadcast(self, sys623):\n \"\"\"Test broadcast of D=0 to the right shape\"\"\"\n # Giving D as a scalar 0 should broadcast to the right shape\n sys = StateSpace(sys623.A, sys623.B, sys623.C, 0)\n np.testing.assert_array_equal(sys623.D, sys.D)\n\n # Giving D as a matrix of the wrong size should generate an error\n with pytest.raises(ValueError):\n sys = StateSpace(sys.A, sys.B, sys.C, np.array([[0]]))\n\n # Make sure that empty systems still work\n sys = StateSpace([], [], [], 1)\n np.testing.assert_array_equal(sys.D, [[1]])\n\n sys = StateSpace([], [], [], [[0]])\n np.testing.assert_array_equal(sys.D, [[0]])\n\n sys = StateSpace([], [], [], [0])\n np.testing.assert_array_equal(sys.D, [[0]])\n\n sys = StateSpace([], [], [], 0)\n np.testing.assert_array_equal(sys.D, [[0]])\n\n def test_pole(self, sys322):\n \"\"\"Evaluate the poles of a MIMO system.\"\"\"\n\n p = np.sort(sys322.pole())\n true_p = np.sort([3.34747678408874,\n -3.17373839204437 + 1.47492908003839j,\n -3.17373839204437 - 1.47492908003839j])\n\n np.testing.assert_array_almost_equal(p, true_p)\n\n def test_zero_empty(self):\n \"\"\"Test to make sure zero() works with no zeros in system.\"\"\"\n sys = _convert_to_statespace(TransferFunction([1], [1, 2, 1]))\n np.testing.assert_array_equal(sys.zero(), np.array([]))\n\n @slycotonly\n def test_zero_siso(self, sys222):\n \"\"\"Evaluate the zeros of a SISO system.\"\"\"\n # extract only first input / first output system of sys222. This system is denoted sys111\n # or tf111\n tf111 = ss2tf(sys222)\n sys111 = tf2ss(tf111[0, 0])\n\n # compute zeros as root of the characteristic polynomial at the numerator of tf111\n # this method is simple and assumed as valid in this test\n true_z = np.sort(tf111[0, 0].zero())\n # Compute the zeros through ab08nd, which is tested here\n z = np.sort(sys111.zero())\n\n np.testing.assert_almost_equal(true_z, z)\n\n @slycotonly\n def test_zero_mimo_sys322_square(self, sys322):\n \"\"\"Evaluate the zeros of a square MIMO system.\"\"\"\n\n z = np.sort(sys322.zero())\n true_z = np.sort([44.41465, -0.490252, -5.924398])\n np.testing.assert_array_almost_equal(z, true_z)\n\n @slycotonly\n def test_zero_mimo_sys222_square(self, sys222):\n \"\"\"Evaluate the zeros of a square MIMO system.\"\"\"\n\n z = np.sort(sys222.zero())\n true_z = np.sort([-10.568501, 3.368501])\n np.testing.assert_array_almost_equal(z, true_z)\n\n @slycotonly\n def test_zero_mimo_sys623_non_square(self, sys623):\n \"\"\"Evaluate the zeros of a non square MIMO system.\"\"\"\n\n z = np.sort(sys623.zero())\n true_z = np.sort([2., -1.])\n np.testing.assert_array_almost_equal(z, true_z)\n\n def test_add_ss(self, sys222, sys322):\n \"\"\"Add two MIMO systems.\"\"\"\n\n A = [[-3., 4., 2., 0., 0.], [-1., -3., 0., 0., 0.],\n [2., 5., 3., 0., 0.], [0., 0., 0., 4., 1.], [0., 0., 0., 2., -3.]]\n B = [[1., 4.], [-3., -3.], [-2., 1.], [5., 2.], [-3., -3.]]\n C = [[4., 2., -3., 2., -4.], [1., 4., 3., 0., 1.]]\n D = [[1., 6.], [1., 0.]]\n\n sys = sys322 + sys222\n\n np.testing.assert_array_almost_equal(sys.A, A)\n np.testing.assert_array_almost_equal(sys.B, B)\n np.testing.assert_array_almost_equal(sys.C, C)\n np.testing.assert_array_almost_equal(sys.D, D)\n\n def test_subtract_ss(self, sys222, sys322):\n \"\"\"Subtract two MIMO systems.\"\"\"\n\n A = [[-3., 4., 2., 0., 0.], [-1., -3., 0., 0., 0.],\n [2., 5., 3., 0., 0.], [0., 0., 0., 4., 1.], [0., 0., 0., 2., -3.]]\n B = [[1., 4.], [-3., -3.], [-2., 1.], [5., 2.], [-3., -3.]]\n C = [[4., 2., -3., -2., 4.], [1., 4., 3., 0., -1.]]\n D = [[-5., 2.], [-1., 2.]]\n\n sys = sys322 - sys222\n\n np.testing.assert_array_almost_equal(sys.A, A)\n np.testing.assert_array_almost_equal(sys.B, B)\n np.testing.assert_array_almost_equal(sys.C, C)\n np.testing.assert_array_almost_equal(sys.D, D)\n\n def test_multiply_ss(self, sys222, sys322):\n \"\"\"Multiply two MIMO systems.\"\"\"\n\n A = [[4., 1., 0., 0., 0.], [2., -3., 0., 0., 0.], [2., 0., -3., 4., 2.],\n [-6., 9., -1., -3., 0.], [-4., 9., 2., 5., 3.]]\n B = [[5., 2.], [-3., -3.], [7., -2.], [-12., -3.], [-5., -5.]]\n C = [[-4., 12., 4., 2., -3.], [0., 1., 1., 4., 3.]]\n D = [[-2., -8.], [1., -1.]]\n\n sys = sys322 * sys222\n\n np.testing.assert_array_almost_equal(sys.A, A)\n np.testing.assert_array_almost_equal(sys.B, B)\n np.testing.assert_array_almost_equal(sys.C, C)\n np.testing.assert_array_almost_equal(sys.D, D)\n\n @pytest.mark.parametrize(\"omega, resp\",\n [(1.,\n np.array([[ 4.37636761e-05-0.01522976j,\n -7.92603939e-01+0.02617068j],\n [-3.31544858e-01+0.0576105j,\n 1.28919037e-01-0.14382495j]])),\n (32,\n np.array([[-1.16548243e-05-3.13444825e-04j,\n -7.99936828e-01+4.54201816e-06j],\n [-3.00137118e-01+3.42881660e-03j,\n 6.32015038e-04-1.21462255e-02j]]))])\n @pytest.mark.parametrize(\"dt\", [None, 0, 1e-3])\n def test_call(self, dt, omega, resp):\n \"\"\"Evaluate the frequency response at single frequencies\"\"\"\n A = [[-2, 0.5], [0.5, -0.3]]\n B = [[0.3, -1.3], [0.1, 0.]]\n C = [[0., 0.1], [-0.3, -0.2]]\n D = [[0., -0.8], [-0.3, 0.]]\n sys = StateSpace(A, B, C, D)\n\n if dt:\n sys = sample_system(sys, dt)\n s = np.exp(omega * 1j * dt)\n else:\n s = omega * 1j\n\n # Correct versions of the call\n np.testing.assert_allclose(evalfr(sys, s), resp, atol=1e-3)\n np.testing.assert_allclose(sys(s), resp, atol=1e-3)\n\n # Deprecated name of the call (should generate error)\n with pytest.raises(AttributeError):\n sys.evalfr(omega)\n\n\n @slycotonly\n def test_freq_resp(self):\n \"\"\"Evaluate the frequency response at multiple frequencies.\"\"\"\n\n A = [[-2, 0.5], [0.5, -0.3]]\n B = [[0.3, -1.3], [0.1, 0.]]\n C = [[0., 0.1], [-0.3, -0.2]]\n D = [[0., -0.8], [-0.3, 0.]]\n sys = StateSpace(A, B, C, D)\n\n true_mag = [[[0.0852992637230322, 0.00103596611395218],\n [0.935374692849736, 0.799380720864549]],\n [[0.55656854563842, 0.301542699860857],\n [0.609178071542849, 0.0382108097985257]]]\n true_phase = [[[-0.566195599644593, -1.68063565332582],\n [3.0465958317514, 3.14141384339534]],\n [[2.90457947657161, 3.10601268291914],\n [-0.438157380501337, -1.40720969147217]]]\n true_omega = [0.1, 10.]\n\n mag, phase, omega = sys.frequency_response(true_omega)\n\n np.testing.assert_almost_equal(mag, true_mag)\n np.testing.assert_almost_equal(phase, true_phase)\n np.testing.assert_equal(omega, true_omega)\n\n # Deprecated version of the call (should return warning)\n with pytest.warns(DeprecationWarning, match=\"will be removed\"):\n mag, phase, omega = sys.freqresp(true_omega)\n np.testing.assert_almost_equal(mag, true_mag)\n\n def test__isstatic(self):\n A0 = np.zeros((2,2))\n A1 = A0.copy()\n A1[0,1] = 1.1\n B0 = np.zeros((2,1))\n B1 = B0.copy()\n B1[0,0] = 1.3\n C0 = A0\n C1 = np.eye(2)\n D0 = 0\n D1 = np.ones((2,1))\n assert StateSpace(A0, B0, C1, D1)._isstatic()\n assert not StateSpace(A1, B0, C1, D1)._isstatic()\n assert not StateSpace(A0, B1, C1, D1)._isstatic()\n assert not StateSpace(A1, B1, C1, D1)._isstatic()\n assert StateSpace(A0, B0, C0, D0)._isstatic()\n assert StateSpace(A0, B0, C0, D1)._isstatic()\n assert StateSpace(A0, B0, C1, D0)._isstatic()\n\n @slycotonly\n def test_minreal(self):\n \"\"\"Test a minreal model reduction.\"\"\"\n # A = [-2, 0.5, 0; 0.5, -0.3, 0; 0, 0, -0.1]\n A = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]\n # B = [0.3, -1.3; 0.1, 0; 1, 0]\n B = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]\n # C = [0, 0.1, 0; -0.3, -0.2, 0]\n C = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]\n # D = [0 -0.8; -0.3 0]\n D = [[0., -0.8], [-0.3, 0.]]\n # sys = ss(A, B, C, D)\n\n sys = StateSpace(A, B, C, D)\n sysr = sys.minreal()\n assert sysr.nstates == 2\n assert sysr.ninputs == sys.ninputs\n assert sysr.noutputs == sys.noutputs\n np.testing.assert_array_almost_equal(\n eigvals(sysr.A), [-2.136154, -0.1638459])\n\n def test_append_ss(self):\n \"\"\"Test appending two state-space systems.\"\"\"\n A1 = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]\n B1 = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]\n C1 = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]\n D1 = [[0., -0.8], [-0.3, 0.]]\n A2 = [[-1.]]\n B2 = [[1.2]]\n C2 = [[0.5]]\n D2 = [[0.4]]\n A3 = [[-2, 0.5, 0, 0], [0.5, -0.3, 0, 0], [0, 0, -0.1, 0],\n [0, 0, 0., -1.]]\n B3 = [[0.3, -1.3, 0], [0.1, 0., 0], [1.0, 0.0, 0], [0., 0, 1.2]]\n C3 = [[0., 0.1, 0.0, 0.0], [-0.3, -0.2, 0.0, 0.0], [0., 0., 0., 0.5]]\n D3 = [[0., -0.8, 0.], [-0.3, 0., 0.], [0., 0., 0.4]]\n sys1 = StateSpace(A1, B1, C1, D1)\n sys2 = StateSpace(A2, B2, C2, D2)\n sys3 = StateSpace(A3, B3, C3, D3)\n sys3c = sys1.append(sys2)\n np.testing.assert_array_almost_equal(sys3.A, sys3c.A)\n np.testing.assert_array_almost_equal(sys3.B, sys3c.B)\n np.testing.assert_array_almost_equal(sys3.C, sys3c.C)\n np.testing.assert_array_almost_equal(sys3.D, sys3c.D)\n\n def test_append_tf(self):\n \"\"\"Test appending a state-space system with a tf\"\"\"\n A1 = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]\n B1 = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]\n C1 = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]\n D1 = [[0., -0.8], [-0.3, 0.]]\n s = TransferFunction([1, 0], [1])\n h = 1 / (s + 1) / (s + 2)\n sys1 = StateSpace(A1, B1, C1, D1)\n sys2 = _convert_to_statespace(h)\n sys3c = sys1.append(sys2)\n np.testing.assert_array_almost_equal(sys1.A, sys3c.A[:3, :3])\n np.testing.assert_array_almost_equal(sys1.B, sys3c.B[:3, :2])\n np.testing.assert_array_almost_equal(sys1.C, sys3c.C[:2, :3])\n np.testing.assert_array_almost_equal(sys1.D, sys3c.D[:2, :2])\n np.testing.assert_array_almost_equal(sys2.A, sys3c.A[3:, 3:])\n np.testing.assert_array_almost_equal(sys2.B, sys3c.B[3:, 2:])\n np.testing.assert_array_almost_equal(sys2.C, sys3c.C[2:, 3:])\n np.testing.assert_array_almost_equal(sys2.D, sys3c.D[2:, 2:])\n np.testing.assert_array_almost_equal(sys3c.A[:3, 3:], np.zeros((3, 2)))\n np.testing.assert_array_almost_equal(sys3c.A[3:, :3], np.zeros((2, 3)))\n\n def test_array_access_ss(self):\n\n sys1 = StateSpace([[1., 2.], [3., 4.]],\n [[5., 6.], [6., 8.]],\n [[9., 10.], [11., 12.]],\n [[13., 14.], [15., 16.]], 1)\n\n sys1_11 = sys1[0, 1]\n np.testing.assert_array_almost_equal(sys1_11.A,\n sys1.A)\n np.testing.assert_array_almost_equal(sys1_11.B,\n sys1.B[:, 1:2])\n np.testing.assert_array_almost_equal(sys1_11.C,\n sys1.C[0:1, :])\n np.testing.assert_array_almost_equal(sys1_11.D,\n sys1.D[0, 1])\n\n assert sys1.dt == sys1_11.dt\n\n def test_dc_gain_cont(self):\n \"\"\"Test DC gain for continuous-time state-space systems.\"\"\"\n sys = StateSpace(-2., 6., 5., 0)\n np.testing.assert_allclose(sys.dcgain(), 15.)\n\n sys2 = StateSpace(-2, [6., 4.], [[5.], [7.], [11]], np.zeros((3, 2)))\n expected = np.array([[15., 10.], [21., 14.], [33., 22.]])\n np.testing.assert_allclose(sys2.dcgain(), expected)\n\n sys3 = StateSpace(0., 1., 1., 0.)\n np.testing.assert_equal(sys3.dcgain(), np.nan)\n\n def test_dc_gain_discr(self):\n \"\"\"Test DC gain for discrete-time state-space systems.\"\"\"\n # static gain\n sys = StateSpace([], [], [], 2, True)\n np.testing.assert_equal(sys.dcgain(), 2)\n\n # averaging filter\n sys = StateSpace(0.5, 0.5, 1, 0, True)\n np.testing.assert_allclose(sys.dcgain(), 1)\n\n # differencer\n sys = StateSpace(0, 1, -1, 1, True)\n np.testing.assert_equal(sys.dcgain(), 0)\n\n # summer\n sys = StateSpace(1, 1, 1, 0, True)\n np.testing.assert_equal(sys.dcgain(), np.nan)\n\n @pytest.mark.parametrize(\"outputs\", range(1, 6))\n @pytest.mark.parametrize(\"inputs\", range(1, 6))\n @pytest.mark.parametrize(\"dt\", [None, 0, 1, True],\n ids=[\"dtNone\", \"c\", \"dt1\", \"dtTrue\"])\n def test_dc_gain_integrator(self, outputs, inputs, dt):\n \"\"\"DC gain when eigenvalue at DC returns appropriately sized array of nan.\n\n the SISO case is also tested in test_dc_gain_{cont,discr}\n time systems (dt=0)\n \"\"\"\n states = max(inputs, outputs)\n\n # a matrix that is singular at DC, and has no \"useless\" states as in\n # _remove_useless_states\n a = np.triu(np.tile(2, (states, states)))\n # eigenvalues all +2, except for ...\n a[0, 0] = 0 if dt in [0, None] else 1\n b = np.eye(max(inputs, states))[:states, :inputs]\n c = np.eye(max(outputs, states))[:outputs, :states]\n d = np.zeros((outputs, inputs))\n sys = StateSpace(a, b, c, d, dt)\n dc = np.squeeze(np.full_like(d, np.nan))\n np.testing.assert_array_equal(dc, sys.dcgain())\n\n def test_scalar_static_gain(self):\n \"\"\"Regression: can we create a scalar static gain?\n\n make sure StateSpace internals, specifically ABC matrix\n sizes, are OK for LTI operations\n \"\"\"\n g1 = StateSpace([], [], [], [2])\n g2 = StateSpace([], [], [], [3])\n\n g3 = g1 * g2\n assert 6 == g3.D[0, 0]\n g4 = g1 + g2\n assert 5 == g4.D[0, 0]\n g5 = g1.feedback(g2)\n np.testing.assert_allclose(2. / 7, g5.D[0, 0])\n g6 = g1.append(g2)\n np.testing.assert_allclose(np.diag([2, 3]), g6.D)\n\n def test_matrix_static_gain(self):\n \"\"\"Regression: can we create matrix static gains?\"\"\"\n d1 = np.array([[1, 2, 3], [4, 5, 6]])\n d2 = np.array([[7, 8], [9, 10], [11, 12]])\n g1 = StateSpace([], [], [], d1)\n\n # _remove_useless_states was making A = [[0]]\n assert (0, 0) == g1.A.shape\n\n g2 = StateSpace([], [], [], d2)\n g3 = StateSpace([], [], [], d2.T)\n\n h1 = g1 * g2\n np.testing.assert_array_equal(np.dot(d1, d2), h1.D)\n h2 = g1 + g3\n np.testing.assert_array_equal(d1 + d2.T, h2.D)\n h3 = g1.feedback(g2)\n np.testing.assert_array_almost_equal(\n solve(np.eye(2) + np.dot(d1, d2), d1), h3.D)\n h4 = g1.append(g2)\n np.testing.assert_array_equal(block_diag(d1, d2), h4.D)\n\n def test_remove_useless_states(self):\n \"\"\"Regression: _remove_useless_states gives correct ABC sizes.\"\"\"\n g1 = StateSpace(np.zeros((3, 3)), np.zeros((3, 4)),\n np.zeros((5, 3)), np.zeros((5, 4)),\n remove_useless_states=True)\n assert (0, 0) == g1.A.shape\n assert (0, 4) == g1.B.shape\n assert (5, 0) == g1.C.shape\n assert (5, 4) == g1.D.shape\n assert 0 == g1.nstates\n\n @pytest.mark.parametrize(\"A, B, C, D\",\n [([1], [], [], [1]),\n ([1], [1], [], [1]),\n ([1], [], [1], [1]),\n ([], [1], [], [1]),\n ([], [1], [1], [1]),\n ([], [], [1], [1]),\n ([1], [1], [1], [])])\n def test_bad_empty_matrices(self, A, B, C, D):\n \"\"\"Mismatched ABCD matrices when some are empty.\"\"\"\n with pytest.raises(ValueError):\n StateSpace(A, B, C, D)\n\n\n def test_minreal_static_gain(self):\n \"\"\"Regression: minreal on static gain was failing.\"\"\"\n g1 = StateSpace([], [], [], [1])\n g2 = g1.minreal()\n np.testing.assert_array_equal(g1.A, g2.A)\n np.testing.assert_array_equal(g1.B, g2.B)\n np.testing.assert_array_equal(g1.C, g2.C)\n np.testing.assert_array_equal(g1.D, g2.D)\n\n def test_empty(self):\n \"\"\"Regression: can we create an empty StateSpace object?\"\"\"\n g1 = StateSpace([], [], [], [])\n assert 0 == g1.nstates\n assert 0 == g1.ninputs\n assert 0 == g1.noutputs\n\n def test_matrix_to_state_space(self):\n \"\"\"_convert_to_statespace(matrix) gives ss([],[],[],D)\"\"\"\n with pytest.deprecated_call():\n D = np.matrix([[1, 2, 3], [4, 5, 6]])\n g = _convert_to_statespace(D)\n\n np.testing.assert_array_equal(np.empty((0, 0)), g.A)\n np.testing.assert_array_equal(np.empty((0, D.shape[1])), g.B)\n np.testing.assert_array_equal(np.empty((D.shape[0], 0)), g.C)\n np.testing.assert_array_equal(D, g.D)\n\n def test_lft(self):\n \"\"\" test lft function with result obtained from matlab implementation\"\"\"\n # test case\n A = [[1, 2, 3],\n [1, 4, 5],\n [2, 3, 4]]\n B = [[0, 2],\n [5, 6],\n [5, 2]]\n C = [[1, 4, 5],\n [2, 3, 0]]\n D = [[0, 0],\n [3, 0]]\n P = StateSpace(A, B, C, D)\n Ak = [[0, 2, 3],\n [2, 3, 5],\n [2, 1, 9]]\n Bk = [[1, 1],\n [2, 3],\n [9, 4]]\n Ck = [[1, 4, 5],\n [2, 3, 6]]\n Dk = [[0, 2],\n [0, 0]]\n K = StateSpace(Ak, Bk, Ck, Dk)\n\n # case 1\n pk = P.lft(K, 2, 1)\n Amatlab = [1, 2, 3, 4, 6, 12, 1, 4, 5, 17, 38, 61, 2, 3, 4, 9, 26, 37,\n 2, 3, 0, 3, 14, 18, 4, 6, 0, 8, 27, 35, 18, 27, 0, 29, 109,\n 144]\n Bmatlab = [0, 10, 10, 7, 15, 58]\n Cmatlab = [1, 4, 5, 0, 0, 0]\n Dmatlab = [0]\n np.testing.assert_allclose(np.array(pk.A).reshape(-1), Amatlab)\n np.testing.assert_allclose(np.array(pk.B).reshape(-1), Bmatlab)\n np.testing.assert_allclose(np.array(pk.C).reshape(-1), Cmatlab)\n np.testing.assert_allclose(np.array(pk.D).reshape(-1), Dmatlab)\n\n # case 2\n pk = P.lft(K)\n Amatlab = [1, 2, 3, 4, 6, 12, -3, -2, 5, 11, 14, 31, -2, -3, 4, 3, 2,\n 7, 0.6, 3.4, 5, -0.6, -0.4, 0, 0.8, 6.2, 10, 0.2, -4.2,\n -4, 7.4, 33.6, 45, -0.4, -8.6, -3]\n Bmatlab = []\n Cmatlab = []\n Dmatlab = []\n np.testing.assert_allclose(np.array(pk.A).reshape(-1), Amatlab)\n np.testing.assert_allclose(np.array(pk.B).reshape(-1), Bmatlab)\n np.testing.assert_allclose(np.array(pk.C).reshape(-1), Cmatlab)\n np.testing.assert_allclose(np.array(pk.D).reshape(-1), Dmatlab)\n\n def test_repr(self, sys322):\n \"\"\"Test string representation\"\"\"\n ref322 = \"\\n\".join([\"StateSpace(array([[-3., 4., 2.],\",\n \" [-1., -3., 0.],\",\n \" [ 2., 5., 3.]]), array([[ 1., 4.],\",\n \" [-3., -3.],\",\n \" [-2., 1.]]), array([[ 4., 2., -3.],\",\n \" [ 1., 4., 3.]]), array([[-2., 4.],\",\n \" [ 0., 1.]]){dt})\"])\n assert repr(sys322) == ref322.format(dt='')\n sysd = StateSpace(sys322.A, sys322.B,\n sys322.C, sys322.D, 0.4)\n assert repr(sysd), ref322.format(dt=\" == 0.4\")\n array = np.array # noqa\n sysd2 = eval(repr(sysd))\n np.testing.assert_allclose(sysd.A, sysd2.A)\n np.testing.assert_allclose(sysd.B, sysd2.B)\n np.testing.assert_allclose(sysd.C, sysd2.C)\n np.testing.assert_allclose(sysd.D, sysd2.D)\n\n def test_str(self, sys322):\n \"\"\"Test that printing the system works\"\"\"\n tsys = sys322\n tref = (\"A = [[-3. 4. 2.]\\n\"\n \" [-1. -3. 0.]\\n\"\n \" [ 2. 5. 3.]]\\n\"\n \"\\n\"\n \"B = [[ 1. 4.]\\n\"\n \" [-3. -3.]\\n\"\n \" [-2. 1.]]\\n\"\n \"\\n\"\n \"C = [[ 4. 2. -3.]\\n\"\n \" [ 1. 4. 3.]]\\n\"\n \"\\n\"\n \"D = [[-2. 4.]\\n\"\n \" [ 0. 1.]]\\n\")\n assert str(tsys) == tref\n tsysdtunspec = StateSpace(tsys.A, tsys.B, tsys.C, tsys.D, True)\n assert str(tsysdtunspec) == tref + \"\\ndt unspecified\\n\"\n sysdt1 = StateSpace(tsys.A, tsys.B, tsys.C, tsys.D, 1.)\n assert str(sysdt1) == tref + \"\\ndt = 1.0\\n\"\n\n def test_pole_static(self):\n \"\"\"Regression: pole() of static gain is empty array.\"\"\"\n np.testing.assert_array_equal(np.array([]),\n StateSpace([], [], [], [[1]]).pole())\n\n def test_horner(self, sys322):\n \"\"\"Test horner() function\"\"\"\n # Make sure we can compute the transfer function at a complex value\n sys322.horner(1. + 1.j)\n\n # Make sure result agrees with frequency response\n mag, phase, omega = sys322.frequency_response([1])\n np.testing.assert_array_almost_equal(\n np.squeeze(sys322.horner(1.j)),\n mag[:, :, 0] * np.exp(1.j * phase[:, :, 0]))\n\nclass TestRss:\n \"\"\"These are tests for the proper functionality of statesp.rss.\"\"\"\n\n # Maxmimum number of states to test + 1\n maxStates = 10\n # Maximum number of inputs and outputs to test + 1\n maxIO = 5\n\n @pytest.mark.parametrize('states', range(1, maxStates))\n @pytest.mark.parametrize('outputs', range(1, maxIO))\n @pytest.mark.parametrize('inputs', range(1, maxIO))\n def test_shape(self, states, outputs, inputs):\n \"\"\"Test that rss outputs have the right state, input, and output size.\"\"\"\n sys = rss(states, outputs, inputs)\n assert sys.nstates == states\n assert sys.ninputs == inputs\n assert sys.noutputs == outputs\n\n @pytest.mark.parametrize('states', range(1, maxStates))\n @pytest.mark.parametrize('outputs', range(1, maxIO))\n @pytest.mark.parametrize('inputs', range(1, maxIO))\n def test_pole(self, states, outputs, inputs):\n \"\"\"Test that the poles of rss outputs have a negative real part.\"\"\"\n sys = rss(states, outputs, inputs)\n p = sys.pole()\n for z in p:\n assert z.real < 0\n\n\nclass TestDrss:\n \"\"\"These are tests for the proper functionality of statesp.drss.\"\"\"\n\n # Maximum number of states to test + 1\n maxStates = 10\n # Maximum number of inputs and outputs to test + 1\n maxIO = 5\n\n @pytest.mark.parametrize('states', range(1, maxStates))\n @pytest.mark.parametrize('outputs', range(1, maxIO))\n @pytest.mark.parametrize('inputs', range(1, maxIO))\n def test_shape(self, states, outputs, inputs):\n \"\"\"Test that drss outputs have the right state, input, and output size.\"\"\"\n sys = drss(states, outputs, inputs)\n assert sys.nstates == states\n assert sys.ninputs == inputs\n assert sys.noutputs == outputs\n\n @pytest.mark.parametrize('states', range(1, maxStates))\n @pytest.mark.parametrize('outputs', range(1, maxIO))\n @pytest.mark.parametrize('inputs', range(1, maxIO))\n def test_pole(self, states, outputs, inputs):\n \"\"\"Test that the poles of drss outputs have less than unit magnitude.\"\"\"\n sys = drss(states, outputs, inputs)\n p = sys.pole()\n for z in p:\n assert abs(z) < 1\n\n\nclass TestLTIConverter:\n \"\"\"Test returnScipySignalLTI method\"\"\"\n\n @pytest.fixture\n def mimoss(self, request):\n \"\"\"Test system with various dt values\"\"\"\n n = 5\n m = 3\n p = 2\n bx, bu = np.mgrid[1:n + 1, 1:m + 1]\n cy, cx = np.mgrid[1:p + 1, 1:n + 1]\n dy, du = np.mgrid[1:p + 1, 1:m + 1]\n return StateSpace(np.eye(5) + np.eye(5, 5, 1),\n bx * bu,\n cy * cx,\n dy * du,\n request.param)\n\n @pytest.mark.parametrize(\"mimoss\",\n [None,\n 0,\n 0.1,\n 1,\n True],\n indirect=True)\n def test_returnScipySignalLTI(self, mimoss):\n \"\"\"Test returnScipySignalLTI method with strict=False\"\"\"\n sslti = mimoss.returnScipySignalLTI(strict=False)\n for i in range(mimoss.noutputs):\n for j in range(mimoss.ninputs):\n np.testing.assert_allclose(sslti[i][j].A, mimoss.A)\n np.testing.assert_allclose(sslti[i][j].B, mimoss.B[:,\n j:j + 1])\n np.testing.assert_allclose(sslti[i][j].C, mimoss.C[i:i + 1,\n :])\n np.testing.assert_allclose(sslti[i][j].D, mimoss.D[i:i + 1,\n j:j + 1])\n if mimoss.dt == 0:\n assert sslti[i][j].dt is None\n else:\n assert sslti[i][j].dt == mimoss.dt\n\n @pytest.mark.parametrize(\"mimoss\", [None], indirect=True)\n def test_returnScipySignalLTI_error(self, mimoss):\n \"\"\"Test returnScipySignalLTI method with dt=None and strict=True\"\"\"\n with pytest.raises(ValueError):\n mimoss.returnScipySignalLTI()\n with pytest.raises(ValueError):\n mimoss.returnScipySignalLTI(strict=True)\n\n\nclass TestStateSpaceConfig:\n \"\"\"Test the configuration of the StateSpace module\"\"\"\n\n @pytest.fixture\n def matarrayout(self):\n \"\"\"Override autoused global fixture within this class\"\"\"\n pass\n\n def test_statespace_defaults(self, matarrayout):\n \"\"\"Make sure the tests are run with the configured defaults\"\"\"\n for k, v in _statesp_defaults.items():\n assert defaults[k] == v, \\\n \"{} is {} but expected {}\".format(k, defaults[k], v)\n\n\n# test data for test_latex_repr below\nLTX_G1 = ([[np.pi, 1e100], [-1.23456789, 5e-23]],\n [[0], [1]],\n [[987654321, 0.001234]],\n [[5]])\n\nLTX_G2 = ([],\n [],\n [],\n [[1.2345, -2e-200], [-1, 0]])\n\nLTX_G1_REF = {\n 'p3_p' : '\\\\[\\n\\\\left(\\n\\\\begin{array}{rllrll|rll}\\n3.&\\\\hspace{-1em}14&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{100}&0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n-1.&\\\\hspace{-1em}23&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-23}&1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\hline\\n9.&\\\\hspace{-1em}88&\\\\hspace{-1em}\\\\cdot10^{8}&0.&\\\\hspace{-1em}00123&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\]',\n\n 'p5_p' : '\\\\[\\n\\\\left(\\n\\\\begin{array}{rllrll|rll}\\n3.&\\\\hspace{-1em}1416&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{100}&0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n-1.&\\\\hspace{-1em}2346&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-23}&1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\hline\\n9.&\\\\hspace{-1em}8765&\\\\hspace{-1em}\\\\cdot10^{8}&0.&\\\\hspace{-1em}001234&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\]',\n\n 'p3_s' : '\\\\[\\n\\\\begin{array}{ll}\\nA = \\\\left(\\\\begin{array}{rllrll}\\n3.&\\\\hspace{-1em}14&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{100}\\\\\\\\\\n-1.&\\\\hspace{-1em}23&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-23}\\\\\\\\\\n\\\\end{array}\\\\right)\\n&\\nB = \\\\left(\\\\begin{array}{rll}\\n0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\\\\\\\nC = \\\\left(\\\\begin{array}{rllrll}\\n9.&\\\\hspace{-1em}88&\\\\hspace{-1em}\\\\cdot10^{8}&0.&\\\\hspace{-1em}00123&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n&\\nD = \\\\left(\\\\begin{array}{rll}\\n5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\end{array}\\n\\\\]',\n\n 'p5_s' : '\\\\[\\n\\\\begin{array}{ll}\\nA = \\\\left(\\\\begin{array}{rllrll}\\n3.&\\\\hspace{-1em}1416&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{100}\\\\\\\\\\n-1.&\\\\hspace{-1em}2346&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-23}\\\\\\\\\\n\\\\end{array}\\\\right)\\n&\\nB = \\\\left(\\\\begin{array}{rll}\\n0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\\\\\\\nC = \\\\left(\\\\begin{array}{rllrll}\\n9.&\\\\hspace{-1em}8765&\\\\hspace{-1em}\\\\cdot10^{8}&0.&\\\\hspace{-1em}001234&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n&\\nD = \\\\left(\\\\begin{array}{rll}\\n5\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\end{array}\\n\\\\]',\n}\n\nLTX_G2_REF = {\n 'p3_p' : '\\\\[\\n\\\\left(\\n\\\\begin{array}{rllrll}\\n1.&\\\\hspace{-1em}23&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&-2\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-200}\\\\\\\\\\n-1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\]',\n\n 'p5_p' : '\\\\[\\n\\\\left(\\n\\\\begin{array}{rllrll}\\n1.&\\\\hspace{-1em}2345&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&-2\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-200}\\\\\\\\\\n-1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\]',\n\n 'p3_s' : '\\\\[\\n\\\\begin{array}{ll}\\nD = \\\\left(\\\\begin{array}{rllrll}\\n1.&\\\\hspace{-1em}23&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&-2\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-200}\\\\\\\\\\n-1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\end{array}\\n\\\\]',\n\n 'p5_s' : '\\\\[\\n\\\\begin{array}{ll}\\nD = \\\\left(\\\\begin{array}{rllrll}\\n1.&\\\\hspace{-1em}2345&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&-2\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\cdot10^{-200}\\\\\\\\\\n-1\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}&0\\\\phantom{.}&\\\\hspace{-1em}&\\\\hspace{-1em}\\\\phantom{\\\\cdot}\\\\\\\\\\n\\\\end{array}\\\\right)\\n\\\\end{array}\\n\\\\]',\n}\n\nrefkey_n = {None: 'p3', '.3g': 'p3', '.5g': 'p5'}\nrefkey_r = {None: 'p', 'partitioned': 'p', 'separate': 's'}\n\[email protected](\" gmats, ref\",\n [(LTX_G1, LTX_G1_REF),\n (LTX_G2, LTX_G2_REF)])\[email protected](\"repr_type\", [None, \"partitioned\", \"separate\"])\[email protected](\"num_format\", [None, \".3g\", \".5g\"])\ndef test_latex_repr(gmats, ref, repr_type, num_format, editsdefaults):\n \"\"\"Test `._latex_repr_` with different config values\n\n This is a 'gold image' test, so if you change behaviour,\n you'll need to regenerate the reference results.\n Try something like:\n control.reset_defaults()\n print(f'p3_p : {g1._repr_latex_()!r}')\n \"\"\"\n from control import set_defaults\n if num_format is not None:\n set_defaults('statesp', latex_num_format=num_format)\n\n if repr_type is not None:\n set_defaults('statesp', latex_repr_type=repr_type)\n\n g = StateSpace(*gmats)\n refkey = \"{}_{}\".format(refkey_n[num_format], refkey_r[repr_type])\n assert g._repr_latex_() == ref[refkey]\n\n\[email protected](\n \"op\",\n [pytest.param(getattr(operator, s), id=s) for s in ('add', 'sub', 'mul')])\[email protected](\n \"tf, arr\",\n [pytest.param(ct.tf([1], [0.5, 1]), np.array(2.), id=\"0D scalar\"),\n pytest.param(ct.tf([1], [0.5, 1]), np.array([2.]), id=\"1D scalar\"),\n pytest.param(ct.tf([1], [0.5, 1]), np.array([[2.]]), id=\"2D scalar\")])\ndef test_xferfcn_ndarray_precedence(op, tf, arr):\n # Apply the operator to the transfer function and array\n ss = ct.tf2ss(tf)\n result = op(ss, arr)\n assert isinstance(result, ct.StateSpace)\n\n # Apply the operator to the array and transfer function\n ss = ct.tf2ss(tf)\n result = op(arr, ss)\n assert isinstance(result, ct.StateSpace)\n\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.matrix",
"numpy.exp",
"numpy.testing.assert_equal",
"numpy.eye",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"numpy.full_like",
"scipy.linalg.eigvals",
"numpy.testing.assert_allclose",
"numpy.array",
"scipy.linalg.block_diag",
"numpy.tile",
"numpy.sort",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
Skeftical/SuRF-Reproducibility | [
"589096b2caa4f173a00f617dd083a042e7b27f5b"
] | [
"codebase/query_generation.py"
] | [
"from pathlib import Path\nimport numpy as np\nimport os\nimport sys\nimport itertools\nfrom pathlib import Path\nimport logging\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG,)\nlogger = logging.getLogger(\"__main__\")\ndef generate_boolean_vector(f,q,r,DIMS):\n \"\"\"\n Generate boolean vector to filter dataset\n \n Parameters:\n -----------\n f : ndarray \n data to which the query is executed\n q : ndarray\n multi-dimensional point represent x\n r : ndarray\n multi-dimensional vector representing l\n DIMS: int\n number of dimensions\n \"\"\"\n b = None\n for i in range(DIMS):\n if b is None:\n b = (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n else :\n b = b & (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n return b\n\n#different execution depending on aggr or density\n#Execute Query Function\ndef execute_query_dens(b,data_space):\n res = data_space[b]\n return res.shape[0]\ndef execute_query_aggr(b,data_space):\n res = data_space[b]\n return float(np.mean(res[:,-1])) if res.shape[0]!=0 else 0\n\ndef generate_queries(DIMS,t,data):\n \"\"\"\n Generates queries of arbitrary dimension over given data\n \n Parmeters:\n ----------\n DIMS : int\n dimensionality of given dataset\n t : str\n 'density' or 'aggr' to differentiate between COUNT aggregate and the rest of the aggregates\n data : ndarray\n The multi-dimensional dataset of row vectors\n \"\"\"\n #Start With clusters\n x = np.linspace(0,1,6)\n a = [x.tolist()]*DIMS\n #Define cluster centers and covariance matrix\n cluster_centers = list(itertools.product(*a))\n cov = np.identity(DIMS)*0.2\n logger.debug(\"Generating queries at %d cluster centers\" % len(cluster_centers))\n query_centers = []\n #Generate queries over cluster centers\n for c in cluster_centers:\n queries = np.random.multivariate_normal(np.array(c), cov, size=50)\n query_centers.append(queries)\n query_centers = np.array(query_centers).reshape(-1,DIMS)\n \n ranges = np.random.uniform(low=0.03**(1/DIMS), high=0.15**(1/DIMS), size=(query_centers.shape[0], 1))\n ranges = np.ones((query_centers.shape[0], DIMS))*ranges\n assert(ranges.shape[0]==query_centers.shape[0])\n queries = []\n i=0\n logger.debug(\"Query Generation\")\n for q,r in zip(query_centers,ranges):\n b = generate_boolean_vector(data,q,r,DIMS)\n if t=='density':\n qt = q.tolist()\n qt += r.tolist()\n qt.append(execute_query_dens(b,data))\n queries.append(qt)\n elif t=='aggr':\n qt = q.tolist()\n qt += r.tolist()\n qt.append(execute_query_aggr(b,data))\n queries.append(qt)\n i+=1\n logger.debug(\"Generated {0} queries\".format(len(queries)))\n return queries\n\nif __name__=='__main__':\n #Generate Queries\n directory = os.fsencode('../input')\n for file in os.listdir(directory):\n qs = []\n filename = os.fsdecode(file)\n if not filename.endswith(\".csv\") and filename.startswith(\"data\"):\n a =filename.split('_')\n t = a[1]\n dim = int(a[2].split('=')[1])\n multi = a[-1]\n #Check if query file has been generated and skip\n qf = '../input/queries/queries-uniform-{0}-multi_{1}-{2}'.format(dim,multi,t)\n if Path(qf).exists():\n print(\"Query file '{0}' already exists skipping \".format(qf))\n continue;\n logger.debug('Loading file')\n f = np.loadtxt('../input/%s' % (filename) ,delimiter=',')\n logger.debug(\"Loaded file with filename %s\" % (filename))\n if t=='aggr':\n f = f.reshape(-1, dim+1)\n else:\n f = f.reshape(-1, dim)\n qs = generate_queries(dim,t,f)\n logger.debug(\"Current shape {0}\".format(len(qs)))\n qs = np.array(qs).reshape(-1, 2*dim+1)\n logger.debug(\"New shape {0}\".format(qs.shape))\n np.savetxt('../input/queries/queries-uniform-{0}-multi_{1}-{2}'.format(dim,multi,t),qs, delimiter=',')\n "
] | [
[
"numpy.linspace",
"numpy.ones",
"numpy.identity",
"numpy.mean",
"numpy.random.uniform",
"numpy.array",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Milozms/OpenNRE | [
"93dc39823645bfebcfb562dd734528dc5c3ce12f"
] | [
"dev.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport time\nimport datetime\nimport os\nimport network\nimport json\nfrom sklearn.metrics import average_precision_score\nimport sys\n\ntf.app.flags.DEFINE_string('export_path','./data/KBP/','path to data')\n\n# config_file = open(os.path.join('data', \"config\"), 'r')\n# config = json.loads(config_file.read())\n# config_file.close()\nfixlen = 300\ntf.app.flags.DEFINE_integer('max_length', fixlen, 'maximum of number of words in one sentence')\ntf.app.flags.DEFINE_integer('pos_num', fixlen * 2 + 1, 'number of position embedding vectors')\ntf.app.flags.DEFINE_integer('num_classes', 7,'maximum of relations')\n\ntf.app.flags.DEFINE_integer('hidden_size',230,'hidden feature size')\ntf.app.flags.DEFINE_integer('pos_size',5,'position embedding size')\ntf.app.flags.DEFINE_integer('word_size', 300, 'word embedding size')\n\ntf.app.flags.DEFINE_integer('batch_size',64,'entity numbers used each training time')\ntf.app.flags.DEFINE_float('learning_rate',0.5,'entity numbers used each training time')\n\ntf.app.flags.DEFINE_string('checkpoint_dir', './checkpoint/', 'path to store checkpoint')\ntf.app.flags.DEFINE_string('test_result_dir', './test_result', 'path to store the test results')\n\ntf.app.flags.DEFINE_string('model_name', 'birnn_att', 'model\\'s name')\ntf.app.flags.DEFINE_string('epoch_range', '(0, 100)', 'checkpoint epoch range')\n\ntf.app.flags.DEFINE_float('drop_prob', 0.5, 'dropout rate')\ntf.app.flags.DEFINE_integer('random_seed', 7698, 'random seed used')\n\nFLAGS = tf.app.flags.FLAGS\n\nfrom framework import Framework\ndef main(_):\n from model.pcnn_att import pcnn_att\n from model.cnn_att import cnn_att\n from model.pcnn_att_adv import pcnn_att_adv\n from model.pcnn_att_soft_label import pcnn_att_soft_label\n from model.pcnn_ave import pcnn_ave\n from model.pcnn_max import pcnn_max\n from model.pcnn import pcnn\n from model.cnn_ave import cnn_ave\n from model.cnn_max import cnn_max\n from model.cnn import cnn\n from model.rnn_att import rnn_att\n from model.rnn_max import rnn_max\n from model.rnn_ave import rnn_ave\n from model.rnn import rnn\n from model.birnn import birnn\n from model.birnn_max import birnn_max\n from model.birnn_ave import birnn_ave\n from model.birnn_att import birnn_att\n\n from model.pcnn_att_tanh import pcnn_att_tanh\n\n from model.pcnn_ave_adv import pcnn_ave_adv\n from model.pcnn_max_adv import pcnn_max_adv\n from model.cnn_ave_adv import cnn_ave_adv\n from model.cnn_max_adv import cnn_max_adv\n from model.cnn_att_adv import cnn_att_adv\n from model.rnn_att_adv import rnn_att_adv\n from model.rnn_max_adv import rnn_max_adv\n from model.rnn_ave_adv import rnn_ave_adv\n from model.birnn_max_adv import birnn_max_adv\n from model.birnn_ave_adv import birnn_ave_adv\n from model.birnn_att_adv import birnn_att_adv\n from model.pcnn_att_adam import pcnn_att_adam\n\n if not FLAGS.model_name in locals():\n exit()\n model = locals()[FLAGS.model_name]\n model(is_training=False, is_dev=True)\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
EmmmmmBoom/KOPRA | [
"2e1ec412b11c709adc01898dfade9315b494e719"
] | [
"code/model.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport dgl\r\nimport math\r\nfrom torch.utils.data import DataLoader\r\n\r\n\r\nclass GATLayer(nn.Module):\r\n def __init__(self, in_dim, out_dim, device):\r\n super(GATLayer, self).__init__()\r\n self.in_dim = in_dim\r\n self.out_dim = out_dim\r\n\r\n self.fc = nn.Linear(in_dim, out_dim, bias=False)\r\n self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)\r\n self.to(device)\r\n \r\n def edge_attention(self, edges):\r\n z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)\r\n try:\r\n a = self.attn_fc(z2)\r\n except:\r\n print(z2, edges.src['z'], edges.dst['z'])\r\n os.system('pause')\r\n e = F.leaky_relu(a)\r\n edges.data['e'] = e\r\n return {'e': e}\r\n \r\n def message_func(self, edges):\r\n return {'z': edges.src['z'], 'e': edges.data['e']}\r\n\r\n def reduce_func(self, nodes):\r\n alpha = F.softmax(nodes.mailbox['e'], dim=1)\r\n h = torch.sum(alpha * nodes.mailbox['z'], dim=1)\r\n return {'h': h}\r\n\r\n def update_edge_e(self, h, g):\r\n self.eval()\r\n z = self.fc(h)\r\n g.ndata['z'] = z\r\n g.apply_edges(self.edge_attention)\r\n g.ndata.pop('z')\r\n return g.edata['e']\r\n\r\n def forward(self, h, g, counter):\r\n \"\"\"Return 以最后一个node为root的gcn输出\r\n :params h: 所有节点emb\r\n \"\"\"\r\n z = self.fc(h)\r\n g.ndata['z'] = z\r\n g.ndata['h'] = z\r\n g.apply_edges(self.edge_attention, edges='__ALL__')\r\n it = 0\r\n while counter > it:\r\n g.update_all(self.message_func, self.reduce_func)\r\n g.ndata['z'] = g.ndata['h']\r\n it += 1\r\n g.ndata.pop('z')\r\n return g.ndata['h']\r\n\t\t\r\n\t\t\r\nclass MyNet(nn.Module):\r\n def __init__(self, num_entities, num_relation, dim, device, hidden_unit=256, \r\n margin=1.0):\r\n super(MyNet, self).__init__()\r\n\r\n self.num_entity = num_entities\r\n self.num_relation = num_relation\r\n self.dim = dim\r\n \r\n self.entity_emb = nn.Embedding(num_entities, dim)\r\n self.gat = GATLayer(dim, dim, device)\r\n # self.bi_atten = BilinearAttention(dim)\r\n self.dropout = nn.Dropout(0)\r\n self.device = device\r\n\r\n self.relation_emb = nn.Embedding(self.num_relation, self.dim * self.dim)\r\n self.criterion = nn.MarginRankingLoss(margin=margin, reduction='none')\r\n self.to(device)\r\n\r\n def get_preference(self, graph_l, graph_s, l_counter, s_counter, l_length, s_length):\r\n ndata_emb_l = self.entity_emb(graph_l.ndata['idx']).squeeze(1)\r\n l_interest_v = self.gat(ndata_emb_l, graph_l, max(l_counter)-1).reshape(-1, self.dim)\r\n l_unbatch_g = [gg0.ndata['h'] for gg0 in dgl.unbatch(graph_l)]\r\n l_interest_v = torch.cat([gg0[l_length[index]-1] for index, gg0 in enumerate(l_unbatch_g)], dim=0).view(-1, 1, self.dim)\r\n\r\n ndata_emb_s = self.entity_emb(graph_s.ndata['idx']).squeeze(1)\r\n s_interest_v = self.gat(ndata_emb_s, graph_s, max(s_counter)-1).reshape(-1, self.dim)\r\n s_unbatch_g = [gg0.ndata['h'] for gg0 in dgl.unbatch(graph_s)]\r\n s_interest_v = torch.cat([gg0[s_length[index]-1] for index, gg0 in enumerate(s_unbatch_g)], dim=0).view(-1, 1, self.dim)\r\n\r\n query = torch.cat((l_interest_v, s_interest_v), dim=1)\r\n return query\r\n\r\n def forward_kg(self, h, r, t, hn, rn, tn):\r\n kge_loss = 0\r\n\r\n h_emb = self.entity_emb(h)\r\n t_emb = self.entity_emb(t).transpose(1, 2)\r\n r_emb = self.relation_emb(r).view(-1, self.dim, self.dim)\r\n hn_emb = self.entity_emb(hn)\r\n tn_emb = self.entity_emb(tn).transpose(1, 2)\r\n rn_emb = self.relation_emb(rn).view(-1, self.dim, self.dim)\r\n distance1 = torch.squeeze(\r\n torch.matmul(torch.matmul(h_emb, r_emb), t_emb)\r\n )\r\n distance2 = torch.squeeze(\r\n torch.matmul(torch.matmul(hn_emb, rn_emb), tn_emb) \r\n )\r\n hRt = torch.squeeze(\r\n torch.matmul(torch.matmul(h_emb, r_emb), t_emb)\r\n )\r\n target = torch.tensor([-1], dtype=torch.long, device=self.device)\r\n \r\n return self.criterion(distance1, distance2, target).mean()\r\n\r\n def forward(self, userId, news_entities_id, graph_l, graph_s, l_counter, s_counter, update_emb, l_length, s_length):\r\n \"\"\"Returns the value of the outputs, the predictions of this model\r\n \r\n :params userId: user id \r\n :params news_entities_id: candidate news entities ids\r\n :params graph_l(dgl object): long-term interest graph\r\n :params graph_s(dgl object): short-term interest graph\r\n \"\"\"\r\n ndata_emb_l = self.entity_emb(graph_l.ndata['idx']).squeeze(1)\r\n l_interest_v = self.gat(ndata_emb_l, graph_l, max(l_counter)-1).reshape(-1, self.dim)\r\n\r\n ndata_emb_list = []\r\n\r\n l_unbatch_g = [gg0.ndata['h'] for gg0 in dgl.unbatch(graph_l)]\r\n l_interest_v = torch.cat([gg0[l_length[index]-1] for index, gg0 in enumerate(l_unbatch_g)], dim=0).view(-1, 1, self.dim)\r\n\r\n ndata_emb_s = self.entity_emb(graph_s.ndata['idx']).squeeze(1)\r\n s_interest_v = self.gat(ndata_emb_s, graph_s, max(s_counter)-1).reshape(-1, self.dim)\r\n\r\n ndata_emb_s_list = []\r\n\r\n s_unbatch_g = [gg0.ndata['h'] for gg0 in dgl.unbatch(graph_s)]\r\n s_interest_v = torch.cat([gg0[s_length[index]-1] for index, gg0 in enumerate(s_unbatch_g)], dim=0).view(-1, 1, self.dim)\r\n\r\n if update_emb:\r\n ndata_emb_list = l_unbatch_g\r\n ndata_emb_s_list = s_unbatch_g\r\n\r\n mask = torch.where(news_entities_id==201659, torch.full_like(news_entities_id, 0), news_entities_id).unsqueeze(1).to(torch.float)\r\n news_v = self.entity_emb(news_entities_id).transpose(1, 2)\r\n \r\n\r\n query = torch.cat((l_interest_v, s_interest_v), dim=1)\r\n scores = torch.bmm(query, news_v) / math.sqrt(query.shape[-1])\r\n atten_weights = self.dropout(F.softmax(scores, dim=1))\r\n tmp = torch.sum(scores.mul(atten_weights), 1, keepdim=True)\r\n outs = torch.sum(torch.where(mask==0, mask, tmp), 2, keepdim=True)\r\n #outs = torch.bmm(scores.transpose(1, 2), atten_weights)\r\n return outs, ndata_emb_list, ndata_emb_s_list#, edata_emb\r\n \r\n"
] | [
[
"torch.nn.MarginRankingLoss",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.cat",
"torch.sum",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear",
"torch.matmul",
"torch.bmm",
"torch.nn.functional.leaky_relu",
"torch.where",
"torch.full_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zamanianlab/polyAudit | [
"040088703e40b207faa4abb7197d0f38e28bf924"
] | [
"polyAudit.py"
] | [
"import sys\nimport argparse\nimport pathlib\nfrom Bio import SeqIO\nfrom Bio import AlignIO\nfrom Bio.Align import AlignInfo\nimport csv\nimport pandas as pd\n\n\ndef primertrim(records, primers, output):\n # remove PacBio primers\n trimmed_records = []\n with open(primers) as primer_file:\n primer_list = []\n for line in primer_file:\n primer_list.append(line.strip())\n\n with open(records) as fasta:\n records_iter = SeqIO.parse(fasta, 'fasta')\n for record in records_iter:\n for primer in primer_list:\n len_primer = len(primer)\n if record.seq.endswith(primer):\n # slice off primer\n record.seq = record.seq[:-len_primer]\n trimmed_records.append(record)\n break\n else:\n pass\n output_path = pathlib.Path(output)\n input_path = pathlib.Path(records)\n stem = input_path.stem\n with open(pathlib.Path.joinpath(output_path, stem + \"_trimmed.fasta\"), 'w') as trimmed:\n for record in trimmed_records:\n SeqIO.write(record, trimmed, 'fasta')\n return trimmed_records\n\n\ndef measurepolya(trimmed_records, output_directory, input_file):\n '''\n Measure the polyA tails of trimmed sequences.\n '''\n ids = []\n tails = []\n if type(trimmed_records) is list:\n print(\"Using trimmed sequences to measure poly(A) tails.\")\n for record in trimmed_records:\n index = 1\n t_counter = 1\n mismatch_counter = 0\n rc = record.seq.reverse_complement()\n for nt in rc:\n if nt == \"T\":\n t_counter += 1\n index += 1\n mismatch_counter = 0\n else:\n index += 1\n mismatch_counter += 1\n if mismatch_counter == 2:\n ids.append(record.id)\n tails.append(t_counter)\n break\n else:\n pass\n elif type(trimmed_records) is str:\n print(\"Using provided sequences to measure poly(A) tails.\")\n with open(trimmed_records) as fasta:\n records_iter = SeqIO.parse(fasta, 'fasta')\n for record in records_iter:\n index = 1\n t_counter = 1\n mismatch_counter = 0\n rc = record.seq.reverse_complement()\n for nt in rc:\n if nt == \"T\":\n t_counter += 1\n index += 1\n mismatch_counter = 0\n else:\n index += 1\n mismatch_counter += 1\n if mismatch_counter == 2:\n ids.append(record.id)\n tails.append(t_counter)\n break\n else:\n pass\n dict = {ids[i]: tails[i] for i in range(len(ids))}\n output_path = pathlib.Path(output_directory)\n input_path = pathlib.Path(input_file)\n stem = input_path.stem\n with open(pathlib.Path.joinpath(output_path, stem + \"_polya.csv\"), 'w') as polyas:\n for key in dict.keys():\n polyas.write(\"%s,%s\\n\" % (key, dict[key]))\n return dict\n\n\ndef findkmer(trimmed_records, length_file, output_directory, k, n, input_file):\n '''\n Removes poly(A) tails and then searches the upstream sequence for\n overrepresented kmers. kmer length provided by -k, which defaults to 6.\n Returns a df with the top 10 (non AAAAAA or TTTTTT) k-mers.\n '''\n counts = {}\n if type(length_file) is dict:\n lengths = length_file\n if type(length_file) is str:\n with open(length_file) as input:\n reader = csv.reader(input)\n lengths = {rows[0]: rows[1] for rows in reader}\n if type(trimmed_records) is list:\n for record in trimmed_records:\n try:\n length = int(lengths.get(record.id))\n except TypeError:\n print(\"Length cannot be coerced to type int.\")\n continue\n polya = int(lengths.get(record.id))\n start = len(record.seq) - (50 + polya)\n sliced_seq = str(record.seq[start:-polya])\n num_kmers = len(sliced_seq) - k + 1\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = sliced_seq[i:i + k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n else:\n counts[kmer] += 1\n elif type(trimmed_records) is str:\n with open(trimmed_records) as fasta:\n records_iter = SeqIO.parse(fasta, 'fasta')\n for record in records_iter:\n try:\n length = int(lengths.get(record.id))\n except TypeError:\n print(\"Length cannot be coerced to type int.\")\n continue\n polya = int(lengths.get(record.id))\n start = len(record.seq) - (50 + polya)\n sliced_seq = str(record.seq[start:-polya])\n num_kmers = len(sliced_seq) - k + 1\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = sliced_seq[i:i + k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n else:\n counts[kmer] += 1\n\n out = pd.DataFrame.from_dict(counts, orient='index',\n columns=['Count'])\n out.index.name = 'kmer'\n out.reset_index(inplace=True)\n out.sort_values(by=['Count'], inplace=True, ascending=False)\n output_path = pathlib.Path(output_directory)\n input_path = pathlib.Path(input_file)\n stem = input_path.stem\n with open(pathlib.Path.joinpath(output_path, stem + \"_\" + str(k) + \"mer.csv\"), 'w') as kmers:\n out.to_csv(kmers, index=False)\n out = out[out.kmer != 'AAAAAA']\n out = out[out.kmer != 'TTTTTT']\n out = out[:n]\n return out\n\n\ndef findpas(trimmed_records, length_file, kmer_file, output_directory, input_file):\n '''\n Removes poly(A) tails and searches the upstream sequences for kmers\n provided by the user or piped from kmercount(). Starts with the top kmer,\n so will only find non-canonical PAS motifs if there hasn't been a canonical\n (AAUAAA) already found.\n '''\n records = []\n seqs = []\n indices = []\n found = []\n\n kmers = []\n if isinstance(kmer_file, pd.DataFrame):\n kmers = kmer_file['kmer'].tolist()\n elif type(kmer_file) is str:\n with open(kmer_file) as input:\n for line in input:\n kmers.append(line.strip())\n # check which type of lengths were provided\n if type(length_file) is dict:\n lengths = length_file\n if type(length_file) is str:\n with open(length_file) as input:\n reader = csv.reader(input)\n lengths = {rows[0]: rows[1] for rows in reader}\n if type(trimmed_records) is list:\n for record in trimmed_records:\n polya = int(lengths.get(record.id))\n start = len(record.seq) - (50 + polya)\n sliced_seq = str(record.seq[start:-polya])\n for kmer in kmers:\n if kmer in sliced_seq:\n index = sliced_seq.find(kmer)\n index = index - 5\n records.append(record.id)\n out_seq = str(record.seq[start:-(polya - 15)])\n seqs.append(out_seq)\n indices.append(index)\n found.append(kmer)\n break\n elif kmers.index(kmer) == len(kmers) - 1:\n records.append(record.id)\n out_seq = str(record.seq[start:-(polya - 15)])\n seqs.append(out_seq)\n indices.append(\"NA\")\n found.append(\"NA\")\n else:\n continue\n elif type(trimmed_records) is str:\n with open(trimmed_records) as fasta:\n records_iter = SeqIO.parse(fasta, 'fasta')\n for record in records_iter:\n polya = int(lengths.get(record.id))\n start = len(record.seq) - (50 + polya)\n sliced_seq = str(record.seq[start:-polya])\n for kmer in kmers:\n if kmer in sliced_seq:\n index = sliced_seq.find(kmer)\n index = index - 5\n records.append(record.id)\n out_seq = str(record.seq[start:-(polya - 15)])\n seqs.append(out_seq)\n indices.append(index)\n found.append(kmer)\n break\n elif kmers.index(kmer) == len(kmers) - 1:\n records.append(record.id)\n out_seq = str(record.seq[start:-(polya - 15)])\n seqs.append(out_seq)\n indices.append(\"NA\")\n found.append(\"NA\")\n else:\n continue\n\n\n df = pd.DataFrame(list(zip(records, seqs, found, indices)), columns=[\n \"Isoform\", \"3'_Sequence\", \"kmer\", \"Index\"])\n output_path = pathlib.Path(output_directory)\n input_path = pathlib.Path(input_file)\n stem = input_path.stem\n with open(pathlib.Path.joinpath(output_path, stem + \"_PAS.csv\"), 'w') as pas:\n df.to_csv(pas)\n return df\n\n\ndef pssm(pas, output_directory, input_file):\n '''\n Retains a portion of the poly(A) tails, aligns by putative PAS, and\n generates a position-specific score matrix (PSSM) to calculate nucleotide\n frequencies at every position.\n '''\n sequences = {}\n\n if isinstance(pas, pd.DataFrame):\n data = pas\n elif type(pas) is str:\n data = pd.read_csv(pas)\n # create a dictionary from the read in CSV\n temp_seqs = {data[\"Isoform\"].tolist()[i]: data[\"3'_Sequence\"].tolist()[i]\n for i in range(len(data[\"Isoform\"].tolist()))}\n # create a dictionary where every sequence is the same length\n for id in temp_seqs:\n if type(temp_seqs[id]) is str and len(temp_seqs[id]) == 65:\n sequences[id] = temp_seqs[id]\n output_path = pathlib.Path(output_directory)\n if isinstance(pas, pd.DataFrame):\n input_path = pathlib.Path(input_file)\n elif type(pas) is str:\n input_path = pathlib.Path(pas)\n stem = input_path.stem\n # # write to FASTA the cleaned sequences\n with open(pathlib.Path.joinpath(output_path, stem + \"_PAS_clean.fasta\"), 'w') as output_file:\n for sequence in sequences:\n output_file.write(\">\" + sequence + \"\\n\" +\n sequences[sequence] + \"\\n\")\n # read the FASTA back in\n alignment = AlignIO.read(pathlib.Path.joinpath(\n output_path, stem + \"_pas_clean.fasta\"), \"fasta\")\n summary_align = AlignInfo.SummaryInfo(alignment)\n consensus = summary_align.dumb_consensus()\n pssm = summary_align.pos_specific_score_matrix(\n consensus, chars_to_ignore=[\"N\"])\n sys.stdout = open(pathlib.Path.joinpath(\n output_path, stem + \"_pssm.txt\"), 'w')\n print(pssm)\n\n\ndef getargs():\n\n # arguments\n parser = argparse.ArgumentParser(\n description='Measure poly(A) tails, count k-mers, find the putative \\\n PAS, and generate PSSMs a given sequence file.')\n\n # required arguments\n parser.add_argument('input_file',\n help='A FASTA file with long-read sequencing data \\\n that hasn''t had poly(A) tails trimmed.')\n\n parser.add_argument('output_directory',\n help='A path to the output directory.')\n\n # optional arguments\n parser.add_argument('-p', '--primer_file',\n help='A file with a single primer sequences per \\\n line. NOTE: it is recommended that the input \\\n sequences are trimmed for adapters and primers prior \\\n to running this program. I do not guarantee a \\\n desirable or predictable outcome with the -p \\\n option.')\n\n parser.add_argument('-polyA', '--polyA', action=\"store_true\",\n help='Runs the algorithm to measure poly(A) tails in \\\n a FASTA file. Requires -p if sequences contain a 3\\' \\\n adapter or primer. Will include poly(A) tail \\\n measurement, k-mer counting in the 50 nt upstream of \\\n of the tail, identification of a putative PAS, and \\\n generation of a PSSM for the entire input file.')\n\n parser.add_argument('-l', '--length_file',\n help='A file with an isoform ID and poly(A) tail \\\n length on each line to be used for k-mer counting \\\n and PAS identification. This file can be generated \\\n with -polyA.')\n\n parser.add_argument('-k', '--k', default=6, type=int,\n help='An integer, the kmer length (defaults to 6).')\n\n parser.add_argument('-n', '--n', default=10, type=int,\n help='An integer, the number of k-mers to pipe from \\\n the k-mer search to the PAS search (defaults to 10).')\n\n parser.add_argument('-PAS', '--pas_file',\n help='Identify the most likely PAS; requires a file \\\n with one k-mer per line. NOTE: the algorithm will \\\n search for each k-mer one at a time and stop the \\\n search once it has found one. Thus, it is HIGHLY \\\n recommended that this file is ordered in descending \\\n PAS/k-mer usage.')\n\n parser.add_argument('-pssm', '--pssm',\n help='Generate a position-specific score matrix for \\\n the region flanking the PAS. Requires a CSV file \\\n (generated by -polyA) with column names Isoform, 3\\' \\\n Sequence, kmer, and Count.')\n\n args = parser.parse_args()\n return args\n\n\ndef main():\n\n args = getargs()\n\n if args.polyA:\n if args.primer_file:\n print(\"Primer file provided; trimming reads.\")\n trim = primertrim(\n args.input_file, args.primer_file, args.output_directory)\n print(\"Measuring poly(A) tails.\")\n lengths = measurepolya(\n trim, args.output_directory, args.input_file)\n print(\"Counting all k-mers in the 50 nt upstream of the start of the poly(A); retaining the %s most abundant.\" % args.n)\n kmer = findkmer(trim, lengths, args.output_directory,\n args.k, args.n, args.input_file)\n print(\"Searching for the most likely PAS.\")\n pas = findpas(trim, lengths, kmer,\n args.output_directory, args.input_file)\n print(\"Generating a position-specific score matrix for all transcripts.\")\n pssm(pas, args.output_directory, args.input_file)\n elif not args.primer_file:\n print(\"Primer file not provided; assuming trimmed reads.\")\n print(\"Measuring poly(A) tails.\")\n lengths = measurepolya(\n args.input_file, args.output_directory,\n args.input_file)\n print(\"Counting all k-mers in the 50 nt upstream of the start of the poly(A); retaining the %s most abundant.\" % args.n)\n kmer = findkmer(args.input_file, lengths, args.output_directory,\n args.k, args.n, args.input_file)\n print(\"Searching for the most likely PAS.\")\n pas = findpas(args.input_file, lengths, kmer,\n args.output_directory, args.input_file)\n print(\"Generating a position-specific score matrix for all transcripts.\")\n pssm(pas, args.output_directory, args.input_file)\n elif args.pssm:\n print(\"Generating a position-specific score matrix for provided transcripts.\")\n pssm(args.pssm, args.output_directory, args.input_file)\n\nmain()\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
antorsae/fMoW | [
"39c99dad6045282f89593803baa8c9457cbf3ec0"
] | [
"code/data_ml_functions/dataFunctions.py"
] | [
"\"\"\"\r\nCopyright 2017 The Johns Hopkins University Applied Physics Laboratory LLC\r\nand Andres Torrubia\r\nAll rights reserved.\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\nhttp://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\n\r\n__author__ = 'jhuapl, antor'\r\n__version__ = 0.1\r\n\r\nimport json\r\nimport os\r\nimport errno\r\nimport numpy as np\r\nimport string\r\nimport dateutil.parser as dparser\r\nfrom PIL import Image\r\nfrom sklearn.utils import class_weight\r\nfrom keras.preprocessing import image\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom functools import partial\r\nfrom tqdm import tqdm\r\nimport warnings\r\n\r\nimport code\r\n#from iterm import show_image\r\nimport math\r\nimport scipy.misc\r\nimport cv2\r\n\r\ndef prepare_data(params):\r\n \"\"\"\r\n Saves sub images, converts metadata to feature vectors and saves in JSON files, \r\n calculates dataset statistics, and keeps track of saved files so they can be loaded as batches\r\n while training the CNN.\r\n :param params: global parameters, used to find location of the dataset and json file\r\n :return: \r\n \"\"\"\r\n\r\n # suppress decompression bomb warnings for Pillow\r\n warnings.simplefilter('ignore', Image.DecompressionBombWarning)\r\n\r\n walkDirs = ['train', 'val', 'test']\r\n\r\n executor = ThreadPoolExecutor(max_workers=params.num_workers)\r\n futures = []\r\n paramsDict = vars(params)\r\n keysToKeep = ['image_format_dataset', 'image_format_processed', 'target_img_size', 'metadata_length', 'category_names', 'context_factor']\r\n paramsDict = {keepKey: paramsDict[keepKey] for keepKey in keysToKeep}\r\n \r\n results = []\r\n for currDir in walkDirs:\r\n isTrain = (currDir == 'train') or (currDir == 'val')\r\n if isTrain:\r\n outDir = params.directories['train_data']\r\n else:\r\n outDir = params.directories['test_data']\r\n\r\n print('Looping through sequences in: ' + currDir)\r\n for it, (root, dirs, files) in enumerate(tqdm(os.walk(os.path.join(params.directories['dataset'], currDir)))):\r\n if len(files) > 0:\r\n slashes = [i for i,ltr in enumerate(root) if ltr == '/']\r\n \r\n for file in files:\r\n if file.endswith('_'+ params.args.img_suffix + '.json'): # _msrgb or _rgb images\r\n task = partial(_process_file, file, slashes, root, isTrain, outDir, params)\r\n futures.append(executor.submit(task))\r\n\r\n print('Preprocessing all files...')\r\n results = []\r\n [results.extend(future.result()) for future in tqdm(futures)]\r\n allTrainFeatures = [np.array(r[0]) for r in results if r[0] is not None]\r\n \r\n metadataTrainSum = np.zeros(params.metadata_length)\r\n for features in allTrainFeatures:\r\n metadataTrainSum += features\r\n\r\n trainingData = [r[1] for r in results if r[1] is not None]\r\n trainCount = len(trainingData)\r\n testData = [r[2] for r in results if r[2] is not None]\r\n\r\n # Shutdown the executor and free resources \r\n print('Computing stats...')\r\n executor.shutdown()\r\n\r\n metadataMean = metadataTrainSum / trainCount\r\n metadataMax = np.zeros(params.metadata_length)\r\n for currFeat in allTrainFeatures:\r\n currFeat = currFeat - metadataMean\r\n for i in range(params.metadata_length):\r\n if abs(currFeat[i]) > metadataMax[i]:\r\n metadataMax[i] = abs(currFeat[i])\r\n for i in range(params.metadata_length):\r\n if metadataMax[i] == 0:\r\n metadataMax[i] = 1.0\r\n metadataStats = {}\r\n metadataStats['metadata_mean'] = metadataMean.tolist()\r\n metadataStats['metadata_max'] = metadataMax.tolist()\r\n json.dump(testData, open(params.files['test_struct'], 'w'))\r\n json.dump(trainingData, open(params.files['training_struct'], 'w'))\r\n json.dump(metadataStats, open(params.files['dataset_stats'], 'w'))\r\n\r\ndef _process_file(file, slashes, root, isTrain, outDir, params):\r\n \"\"\"\r\n Helper for prepare_data that actually loads and resizes each image and computes\r\n feature vectors. This function is designed to be called in parallel for each file\r\n :param file: file to process\r\n :param slashes: location of slashes from root walk path\r\n :param root: root walk path\r\n :param isTrain: flag on whether or not the current file is from the train set\r\n :param outDir: output directory for processed data\r\n :param params: dict of the global parameters with only the necessary fields\r\n :return (allFeatures, allTrainResults, allTestResults)\r\n \"\"\"\r\n noResult = [(None, None, None)]\r\n baseName = file[:-5]\r\n\r\n imgFile = baseName + '.' + params.image_format_dataset\r\n \r\n if not os.path.isfile(os.path.join(root, imgFile)):\r\n print(os.path.join(root, imgFile))\r\n return noResult\r\n\r\n jsonData = json.load(open(os.path.join(root, file)))\r\n if not isinstance(jsonData['bounding_boxes'], list):\r\n jsonData['bounding_boxes'] = [jsonData['bounding_boxes']]\r\n\r\n allResults = []\r\n img = None\r\n for bb in jsonData['bounding_boxes']:\r\n if isTrain:\r\n category = bb['category']\r\n box = bb['box']\r\n\r\n outBaseName = '%d' % bb['ID']\r\n if isTrain:\r\n outBaseName = ('%s_' % category) + outBaseName\r\n\r\n if isTrain:\r\n currOut = os.path.join(outDir, root[slashes[-3] + 1:], outBaseName)\r\n else:\r\n currOut = os.path.join(outDir, root[slashes[-2] + 1:], outBaseName)\r\n\r\n if not os.path.isdir(currOut):\r\n try:\r\n os.makedirs(currOut)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n pass\r\n\r\n featuresPath = os.path.join(currOut, baseName + '_features.json')\r\n imgPath = os.path.join(currOut, baseName + '.' + params.image_format_processed)\r\n\r\n if not os.path.isfile(imgPath):\r\n\r\n if img is None:\r\n try:\r\n img = scipy.misc.imread(os.path.join(root, imgFile))\r\n except:\r\n print(os.path.join(root, imgFile))\r\n return noResult\r\n\r\n if False:\r\n # fixed context\r\n\r\n x_size, y_size = box[2], box[3]\r\n x0, y0 = box[0], box[1]\r\n x1, y1 = min(x0 + x_size, img.shape[1]-1), min(y0 + y_size, img.shape[0]-1)\r\n\r\n x_side, y_side = x_size /2 , y_size /2\r\n\r\n # don't train on tiny boxes\r\n if x_size <= 2 or y_size <= 2:\r\n print(\"Tiny box @ \" + file)\r\n #continue\r\n\r\n x_center = x0 + x_side\r\n y_center = y0 + y_side\r\n\r\n _x0 = np.clip(x_center - x_side * params.context_factor, 0, img.shape[1]-1)\r\n _x1 = np.clip(x_center + x_side * params.context_factor, 0, img.shape[1]-1)\r\n _y0 = np.clip(y_center - y_side * params.context_factor, 0, img.shape[0]-1)\r\n _y1 = np.clip(y_center + y_side * params.context_factor, 0, img.shape[0]-1)\r\n else:\r\n # variable context\r\n # \r\n # basefile strategy, see https://arxiv.org/pdf/1711.07846.pdf\r\n # ie: 'We found that it was useful to provide more context for categories\r\n # with smaller sizes (e.g., single-unit residential) and\r\n # less context for categories that generally cover larger areas\r\n # (e.g., airports).' (page 7)\r\n\r\n if box[2] <= 2 or box[3] <= 2:\r\n print(\"Tiny box @ \" + file)\r\n #continue\r\n \r\n contextMultWidth = 0.15\r\n contextMultHeight = 0.15\r\n \r\n wRatio = float(box[2]) / img.shape[0]\r\n hRatio = float(box[3]) / img.shape[1]\r\n \r\n if wRatio < 0.5 and wRatio >= 0.4:\r\n contextMultWidth = 0.2\r\n if wRatio < 0.4 and wRatio >= 0.3:\r\n contextMultWidth = 0.3\r\n if wRatio < 0.3 and wRatio >= 0.2:\r\n contextMultWidth = 0.5\r\n if wRatio < 0.2 and wRatio >= 0.1:\r\n contextMultWidth = 1\r\n if wRatio < 0.1:\r\n contextMultWidth = 2\r\n \r\n if hRatio < 0.5 and hRatio >= 0.4:\r\n contextMultHeight = 0.2\r\n if hRatio < 0.4 and hRatio >= 0.3:\r\n contextMultHeight = 0.3\r\n if hRatio < 0.3 and hRatio >= 0.2:\r\n contextMultHeight = 0.5\r\n if hRatio < 0.2 and hRatio >= 0.1:\r\n contextMultHeight = 1\r\n if hRatio < 0.1:\r\n contextMultHeight = 2\r\n \r\n \r\n widthBuffer = int((box[2] * contextMultWidth) / 2.0)\r\n heightBuffer = int((box[3] * contextMultHeight) / 2.0)\r\n\r\n r1 = box[1] - heightBuffer\r\n r2 = box[1] + box[3] + heightBuffer\r\n c1 = box[0] - widthBuffer\r\n c2 = box[0] + box[2] + widthBuffer\r\n\r\n if r1 < 0:\r\n r1 = 0\r\n if r2 > img.shape[0]:\r\n r2 = img.shape[0]\r\n if c1 < 0:\r\n c1 = 0\r\n if c2 > img.shape[1]:\r\n c2 = img.shape[1]\r\n\r\n if r1 >= r2 or c1 >= c2:\r\n print(\"Inconsistent dimensions @ \" + file)\r\n continue\r\n\r\n _x0, _x1 = c1, c2\r\n _y0, _y1 = r1, r2\r\n\r\n # take 3 points and leave sqrt(2) * side so that rotating the patch around center\r\n # always has valid pixels in the center params.target_img_size square\r\n src_points = np.float32([[_x0,_y0], [_x1, _y0], [_x1, _y1]])\r\n sq2 = 1.4142135624 \r\n patch_size = params.target_img_size * (sq2 + params.offset + params.zoom)\r\n patch_center = patch_size / 2\r\n patch_crop = params.target_img_size / 2 \r\n dst_points = np.float32((\r\n [ patch_center - patch_crop , patch_center - patch_crop ], \r\n [ patch_center + patch_crop , patch_center - patch_crop ], \r\n [ patch_center + patch_crop , patch_center + patch_crop ])) \r\n\r\n M = cv2.getAffineTransform(src_points, dst_points)\r\n patch_size_int = int(math.ceil(patch_size))\r\n _img = cv2.warpAffine(img, M, (patch_size_int, patch_size_int), borderMode = cv2.BORDER_REFLECT_101).astype(np.float32)\r\n\r\n if False:\r\n show_image(_img)\r\n print(category)\r\n raw_input(\"Press it now\")\r\n\r\n scipy.misc.imsave(imgPath, _img)\r\n\r\n features = json_to_feature_vector(params, jsonData, bb)\r\n features = features.tolist()\r\n\r\n json.dump(features, open(featuresPath, 'w'))\r\n\r\n if isTrain:\r\n allResults.append((features, {\"features_path\": featuresPath, \"img_path\": imgPath, \"category\": params.category_names.index(category)}, None))\r\n else:\r\n allResults.append((None, None, {\"features_path\": featuresPath, \"img_path\": imgPath}))\r\n\r\n return allResults\r\n\r\ndef json_to_feature_vector(params, jsonData, bb):\r\n features = np.zeros(params.metadata_length, dtype=float)\r\n features[0] = float(jsonData['gsd'])\r\n x,y = utm_to_xy(jsonData['utm'])\r\n features[1] = x\r\n features[2] = y\r\n features[3] = float(jsonData['cloud_cover']) / 100.0\r\n date = dparser.parse(jsonData['timestamp'])\r\n features[4] = float(date.year)\r\n features[5] = float(date.month) / 12.0\r\n features[6] = float(date.day) / 31.0\r\n features[7] = float(date.hour) + float(date.minute)/60.0\r\n\r\n if jsonData['scan_direction'].lower() == 'forward':\r\n features[8] = 0.0\r\n else:\r\n features[8] = 1.0\r\n features[9] = float(jsonData['pan_resolution_dbl'])\r\n features[10] = float(jsonData['pan_resolution_start_dbl'])\r\n features[11] = float(jsonData['pan_resolution_end_dbl'])\r\n features[12] = float(jsonData['pan_resolution_min_dbl'])\r\n features[13] = float(jsonData['pan_resolution_max_dbl'])\r\n features[14] = float(jsonData['multi_resolution_dbl'])\r\n features[15] = float(jsonData['multi_resolution_min_dbl'])\r\n features[16] = float(jsonData['multi_resolution_max_dbl'])\r\n features[17] = float(jsonData['multi_resolution_start_dbl'])\r\n features[18] = float(jsonData['multi_resolution_end_dbl'])\r\n features[19] = float(jsonData['target_azimuth_dbl']) / 360.0\r\n features[20] = float(jsonData['target_azimuth_min_dbl']) / 360.0\r\n features[21] = float(jsonData['target_azimuth_max_dbl']) / 360.0\r\n features[22] = float(jsonData['target_azimuth_start_dbl']) / 360.0\r\n features[23] = float(jsonData['target_azimuth_end_dbl']) / 360.0\r\n features[24] = float(jsonData['sun_azimuth_dbl']) / 360.0\r\n features[25] = float(jsonData['sun_azimuth_min_dbl']) / 360.0\r\n features[26] = float(jsonData['sun_azimuth_max_dbl']) / 360.0\r\n features[27] = float(jsonData['sun_elevation_min_dbl']) / 90.0\r\n features[28] = float(jsonData['sun_elevation_dbl']) / 90.0\r\n features[29] = float(jsonData['sun_elevation_max_dbl']) / 90.0\r\n features[30] = float(jsonData['off_nadir_angle_dbl']) / 90.0\r\n features[31] = float(jsonData['off_nadir_angle_min_dbl']) / 90.0\r\n features[32] = float(jsonData['off_nadir_angle_max_dbl']) / 90.0\r\n features[33] = float(jsonData['off_nadir_angle_start_dbl']) / 90.0\r\n features[34] = float(jsonData['off_nadir_angle_end_dbl']) / 90.0\r\n features[35] = float(bb['box'][2])\r\n features[36] = float(bb['box'][3])\r\n features[37] = float(jsonData['img_width'])\r\n features[38] = float(jsonData['img_height'])\r\n features[39] = float(date.weekday())\r\n features[40] = min([features[35], features[36]]) / max([features[37], features[38]])\r\n features[41] = features[35] / features[37]\r\n features[42] = features[36] / features[38]\r\n features[43] = date.second\r\n if len(jsonData['bounding_boxes']) == 1:\r\n features[44] = 1.0\r\n else:\r\n features[44] = 0.0\r\n \r\n return features\r\n\r\n\r\ndef flip_axis(x, axis):\r\n x = np.asarray(x).swapaxes(axis, 0)\r\n x = x[::-1, ...]\r\n x = x.swapaxes(0, axis)\r\n return x\r\n \r\ndef utm_to_xy(zone):\r\n \"\"\"\r\n Converts UTM zone to x,y values between 0 and 1.\r\n :param zone: UTM zone (string)\r\n :return (x,y): values between 0 and 1\r\n \"\"\"\r\n nums = range(1,61);\r\n letters = string.ascii_lowercase[2:-2]\r\n if len(zone) == 2:\r\n num = int(zone[0:1])\r\n else:\r\n num = int(zone[0:2])\r\n letter = zone[-1].lower()\r\n numIndex = nums.index(num)\r\n letterIndex = letters.index(letter)\r\n x = float(numIndex) / float(len(nums)-1)\r\n y = float(letterIndex) / float(len(letters)-1)\r\n return (x,y)\r\n\r\ndef get_batch_inds(batch_size, idx, N):\r\n \"\"\"\r\n Generates an array of indices of length N\r\n :param batch_size: the size of training batches\r\n :param idx: data to split into batches\r\n :param N: Maximum size\r\n :return batchInds: list of arrays of data of length batch_size\r\n \"\"\"\r\n batchInds = []\r\n idx0 = 0\r\n\r\n toProcess = True\r\n while toProcess:\r\n idx1 = idx0 + batch_size\r\n if idx1 > N:\r\n idx1 = N\r\n idx0 = idx1 - batch_size\r\n toProcess = False\r\n batchInds.append(idx[idx0:idx1])\r\n idx0 = idx1\r\n\r\n return batchInds\r\n\r\ndef calculate_class_weights(params):\r\n \"\"\"\r\n Computes the class weights for the training data and writes out to a json file \r\n :param params: global parameters, used to find location of the dataset and json file\r\n :return: \r\n \"\"\"\r\n \r\n counts = {}\r\n for i in range(0,params.num_labels):\r\n counts[i] = 0\r\n\r\n trainingData = json.load(open(params.files['training_struct']))\r\n\r\n ytrain = []\r\n for i,currData in enumerate(trainingData):\r\n ytrain.append(currData['category'])\r\n counts[currData['category']] += 1\r\n\r\n classWeights = class_weight.compute_class_weight('balanced', np.unique(ytrain), np.array(ytrain))\r\n\r\n with open(params.files['class_weight'], 'w') as json_file:\r\n json.dump(classWeights.tolist(), json_file)\r\n"
] | [
[
"numpy.unique",
"numpy.asarray",
"numpy.clip",
"numpy.float32",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ludwigschwardt/katbeam | [
"9e4dbb4c1218b4b33f046a853d80dcc42c447855"
] | [
"tests/test_jimbeam.py"
] | [
"import pytest\nimport numpy as np\nimport matplotlib\n# Enforce a non-interactive Matplotlib backend\nmatplotlib.use('agg')\nimport matplotlib.pylab as plt # noqa: E402\n\nfrom katbeam import JimBeam # noqa: E402\n\n\ndef test_unknown_model_name():\n with pytest.raises(ValueError):\n JimBeam('MKAT-AA-UHF-JIM-2012')\n\n\[email protected](\n 'name,pol,x,y,freqMHz,value',\n [\n ('MKAT-AA-UHF-JIM-2020', 'HH', 0, 0, 800, 1.0),\n ('MKAT-AA-L-JIM-2020', 'HH', 0, 0, 1420, 0.999774),\n ('MKAT-AA-UHF-JIM-2020', 'VV', 0, 1, 800, 0.6600966),\n ('MKAT-AA-L-JIM-2020', 'VV', 0, 1, 1420, 0.2062726),\n ('MKAT-AA-UHF-JIM-2020', 'I', 1, 0, 800, 0.4077328),\n ('MKAT-AA-L-JIM-2020', 'I', 1, 0, 1420, 0.02575332),\n ]\n)\ndef test_sample_beam_values(name, pol, x, y, freqMHz, value):\n beam = JimBeam(name)\n pattern = getattr(beam, pol)\n assert pattern(x, y, freqMHz) == pytest.approx(value)\n\n\ndef showbeam(beam, freqMHz=1000, pol='HH', beamextent=10.):\n margin = np.linspace(-beamextent / 2., beamextent / 2., 128)\n x, y = np.meshgrid(margin, margin)\n pattern = getattr(beam, pol)\n beampixels = pattern(x, y, freqMHz)\n fig, ax = plt.subplots()\n ax.imshow(beampixels, extent=[-beamextent / 2., beamextent / 2.,\n -beamextent / 2., beamextent / 2.])\n ax.set_title('{} pol beam\\nfor {} at {:d}MHz'.format(pol, beam.name, freqMHz))\n return fig\n\n\[email protected]_image_compare(remove_text=True, filename='UHF_800_HH_10.png')\ndef test_UHF_beam_image():\n beam = JimBeam('MKAT-AA-UHF-JIM-2020')\n return showbeam(beam, 800, 'HH', 10.)\n\n\[email protected]_image_compare(remove_text=True, filename='L_1420_VV_5.png')\ndef test_L_beam_image():\n beam = JimBeam('MKAT-AA-L-JIM-2020')\n return showbeam(beam, 1420, 'VV', 5.)\n"
] | [
[
"matplotlib.use",
"numpy.meshgrid",
"matplotlib.pylab.subplots",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dalonlobo/textsplit | [
"edbdd7b640b71f777065e312eccd684cc21fcee6"
] | [
"tests/test_textsplit.py"
] | [
"import unittest\nimport numpy as np\nfrom ..algorithm import split_greedy, split_optimal, get_total, get_gains\nfrom ..tools import get_penalty, P_k\n\nDIM = 20\n\ndef getDoc(segment_len, n_seg):\n return np.vstack([np.tile(w, (segment_len, 1))\n for w in np.random.random((n_seg, DIM))])\n\n\ndocA = getDoc(20, 10)\npenaltyA = get_penalty([docA], 20) # get_penalty is deterministic here\n\nclass TestTextSplit(unittest.TestCase):\n\n def test_get_penalty(self):\n seg = split_greedy(docA, penalty=penaltyA)\n self.assertEqual(len(seg.splits), 9)\n\n def test_split_greedy_penalty(self):\n seg = split_greedy(docA, penalty=penaltyA)\n self.assertEqual(len(seg.splits), len(seg.gains))\n self.assertGreater(np.percentile(seg.gains, 25), penaltyA)\n gains2 = get_gains(docA, seg.splits)\n self.assertTrue(all(np.isclose(seg.gains, gains2)))\n\n def test_split_greedy_max_splits(self):\n seg = split_greedy(docA, max_splits=5)\n self.assertEqual(len(seg.splits), len(seg.gains))\n self.assertTrue(len(seg.splits) == len(seg.gains) == 5)\n\n def test_split_greedy_penalty_max_splits(self):\n seg = split_greedy(docA, penalty=penaltyA, max_splits=5)\n self.assertEqual(len(seg.splits), len(seg.gains))\n self.assertEqual(len(seg.splits), 5)\n self.assertGreater(np.percentile(seg.gains, 25), penaltyA)\n\n def test_split_optimal(self):\n seg = split_optimal(docA, penalty=penaltyA)\n self.assertEqual(len(seg.splits), len(seg.gains))\n print(len(seg.splits))\n self.assertGreater(np.min(seg.gains) + 0.00001, penaltyA)\n\n def test_split_optimal_vs_greedy(self):\n docs = [np.random.random((100, DIM)) for _ in range(100)]\n penalty = get_penalty(docs, 10)\n for i, doc in enumerate(docs):\n seg_o = split_optimal(doc, penalty=penalty)\n seg_g = split_greedy(doc, penalty=penalty)\n self.assertAlmostEqual(seg_o.total, get_total(doc, seg_o.splits, penalty), places=3)\n self.assertAlmostEqual(seg_g.total, get_total(doc, seg_g.splits, penalty), places=3)\n self.assertGreaterEqual(seg_o.total + 0.001, seg_g.total)\n\n def test_split_optimal_with_seg_limit(self):\n docs = [np.random.random((100, DIM)) for _ in range(10)]\n penalty = get_penalty(docs, 20)\n for i, doc in enumerate(docs):\n seg = split_optimal(doc, penalty=penalty)\n cuts = [0] + seg.splits + [100]\n seg2 = split_optimal(\n doc, penalty=penalty, seg_limit=np.diff(cuts).max()+1)\n self.assertTrue(seg2.optimal)\n self.assertEqual(seg.splits, seg2.splits)\n self.assertAlmostEqual(seg.total, seg2.total)\n\n def test_P_k(self):\n docs = [np.random.random((100, DIM)) for _ in range(10)]\n penalty = get_penalty(docs, 10)\n for i, doc in enumerate(docs):\n seg_o = split_optimal(doc, penalty=penalty)\n seg_g = split_greedy(doc, penalty=penalty)\n pk = P_k(seg_o.splits, seg_g.splits, len(doc))\n self.assertGreaterEqual(pk, 0)\n self.assertGreaterEqual(1, pk)\n"
] | [
[
"numpy.random.random",
"numpy.min",
"numpy.tile",
"numpy.percentile",
"numpy.diff",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcoMiretti/scikit-fuzzy | [
"4c3aebd4f187887e6ffe04f35bfbaf4d42da4491"
] | [
"skfuzzy/fuzzymath/fuzzy_ops.py"
] | [
"\"\"\"\nfuzzy_ops.py : Package of general operations on fuzzy sets, fuzzy membership\n functions, and their associated universe variables.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\n\n\ndef cartadd(x, y):\n \"\"\"\n Cartesian addition of fuzzy membership vectors using the algebraic method.\n\n Parameters\n ----------\n x : 1D array or iterable\n First fuzzy membership vector, of length M.\n y : 1D array or iterable\n Second fuzzy membership vector, of length N.\n\n Returns\n -------\n z : 2D array\n Cartesian addition of ``x`` and ``y``, of shape (M, N).\n\n \"\"\"\n # Ensure rank-1 input\n x, y = np.asarray(x).ravel(), np.asarray(y).ravel()\n b, a = np.meshgrid(y, x, sparse=True)\n return a + b\n\n\ndef cartprod(x, y):\n \"\"\"\n Cartesian product of two fuzzy membership vectors. Uses ``min()``.\n\n Parameters\n ----------\n x : 1D array or iterable\n First fuzzy membership vector, of length M.\n y : 1D array or iterable\n Second fuzzy membership vector, of length N.\n\n Returns\n -------\n z : 2D array\n Cartesian product of ``x`` and ``y``, of shape (M, N).\n\n \"\"\"\n # Ensure rank-1 input\n x, y = np.asarray(x).ravel(), np.asarray(y).ravel()\n b, a = np.meshgrid(y, x, sparse=True)\n return np.fmin(a, b)\n\n\ndef classic_relation(a, b):\n \"\"\"\n Determine the classic relation matrix, ``R``, between two fuzzy sets.\n\n Parameters\n ----------\n a : 1D array or iterable\n First fuzzy membership vector, of length M.\n b : 1D array or iterable\n Second fuzzy membership vector, of length N.\n\n Returns\n -------\n R : 2D array\n Classic relation matrix between ``a`` and ``b``, shape (M, N)\n\n Notes\n -----\n The classic relation is defined as::\n\n r = [a x b] U [(1 - a) x ones(1, N)],\n\n where ``x`` represents a cartesian product and ``N`` is len(``b``).\n\n \"\"\"\n a = np.asarray(a)\n return np.fmax(cartprod(a, b), cartprod(1 - a, np.ones_like(b)))\n\n\ndef contrast(arr, amount=0.2, split=0.5, normalize=True):\n \"\"\"\n General contrast booster or diffuser of normalized array-like data.\n\n Parameters\n ----------\n arr : ndarray\n Input array (of floats on range [0, 1] if ``normalize=False``). If\n values exist outside this range, with ``normalize=True`` the image\n will be normalized for calculation.\n amount : float or length-2 iterable of floats\n Controls the exponential contrast mechanism for values above and below\n ``split`` in ``I``. If positive, the curve provides added contrast;\n if negative, the curve provides reduced contrast.\n\n If provided as a lenth-2 iterable of floats, they control the regions\n (below, above) ``split`` separately.\n split : float\n Positive scalar, on range [0, 1], determining the midpoint of the\n exponential contrast. Default of 0.5 is reasonable for well-exposed\n images.\n normalize : bool, default True\n Controls normalization to the range [0, 1].\n\n Returns\n -------\n focused : ndarray\n Contrast adjusted, normalized, floating-point image on range [0, 1].\n\n Notes\n -----\n The result of this algorithm is like applying a Curves adjustment in the\n GIMP or Photoshop.\n\n Algorithm for curves adjustment at a given pixel, x, is given by::\n\n | split * (x/split)^below, 0 <= x <= split\n y(x) = |\n | 1 - (1-split) * ((1-x) / (1-split))^above, split < x <= 1.0\n\n See Also\n --------\n skfuzzy.fuzzymath.sigmoid\n\n \"\"\"\n # Ensure scalars are floats, to avoid truncating division in Python 2.x\n split = float(split)\n im = arr.astype(float)\n amount_ = np.asarray(amount, dtype=np.float64).ravel()\n\n if len(amount_) == 1:\n # One argument -> Equal amount applied on either side of `split`\n above = below = amount_[0]\n else:\n # Two arguments -> Control contrast separately in light/dark regions\n below = amount_[0]\n above = amount_[1]\n\n # Normalize if required\n if im.max() > 1. and normalize is True:\n ma = float(im.max())\n im /= float(im.max())\n else:\n ma = 1.\n\n focused = np.zeros_like(im, dtype=np.float64)\n\n # Simplified array-wise algorithm using fancy indexing rather than looping\n focused[im <= split] = split * (im[im <= split] / split) ** below\n focused[im > split] = (1 - (1. - split) *\n ((1 - im[im > split]) / (1. - split)) ** above)\n\n # Reapply multiplicative factor\n return focused * ma\n\n\ndef fuzzy_op(x, a, y, b, op):\n \"\"\"Operation of two fuzzy sets.\n \n Operate fuzzy set ``a`` with fuzzy set ``b``,\n using +, * or any other binary operator.\n\n Parameters\n ----------\n x : 1d array, length N\n Universe variable for fuzzy set ``a``.\n a : 1d array, length N\n Fuzzy set for universe ``x``.\n y : 1d array, length M\n Universe variable for fuzzy set ``b``.\n b : 1d array, length M\n Fuzzy set for universe ``y``.\n op: Function, pointwise binary operator on two matrices\n (pointwise version of) +, -, *, /, min, max etc.\n\n Returns\n -------\n z : 1d array\n Output variable.\n mfz : 1d array\n Fuzzy membership set for variable ``z``.\n\n Notes\n -----\n Uses Zadeh's Extension Principle as described in Ross, Fuzzy Logic with\n Engineering Applications (2010), pp. 414, Eq. 12.17.\n\n If these results are unexpected and your membership functions are convex,\n consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics\n using interval arithmetic via the restricted Dong, Shah, and Wong method.\n\n \"\"\"\n # a and x, and b and y, are formed into (MxN) matrices. The former has\n # identical rows; the latter identical identical columns.\n\n yy, xx = np.meshgrid(y, x, sparse=True) # consider broadcasting rules\n bb, aa = np.meshgrid(b, a, sparse=True)\n\n # Do the operation\n zz = op(xx, yy).ravel()\n zz_index = np.argsort(zz)\n zz = np.sort(zz)\n\n # Array min() operation\n c = np.fmin(aa, bb).ravel()\n c = c[zz_index]\n\n # Initialize loop\n z, mfz = np.zeros(0), np.zeros(0)\n idx = 0\n\n for _ in range(len(c)):\n index = np.nonzero(zz == zz[idx])[0]\n z = np.hstack((z, zz[idx]))\n mfz = np.hstack((mfz, c[index].max()))\n idx = index[-1] + 1\n if idx >= len(zz):\n break\n\n return z, mfz\n\ndef fuzzy_add(x, a, y, b):\n \"\"\"\n Add fuzzy set ``a`` to fuzzy set ``b``.\n\n Parameters\n ----------\n x : 1d array, length N\n Universe variable for fuzzy set ``a``.\n a : 1d array, length N\n Fuzzy set for universe ``x``.\n y : 1d array, length M\n Universe variable for fuzzy set ``b``.\n b : 1d array, length M\n Fuzzy set for universe ``y``.\n\n Returns\n -------\n z : 1d array\n Output variable.\n mfz : 1d array\n Fuzzy membership set for variable ``z``.\n\n Notes\n -----\n Uses Zadeh's Extension Principle as described in Ross, Fuzzy Logic with\n Engineering Applications (2010), pp. 414, Eq. 12.17.\n\n If these results are unexpected and your membership functions are convex,\n consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics\n using interval arithmetic via the restricted Dong, Shah, and Wong method.\n\n \"\"\"\n return fuzzy_op(x, a, y, b, op=np.add)\n\n\ndef fuzzy_compare(q):\n \"\"\"\n Determine the comparison matrix, ``c``, based on the fuzzy pairwise\n comparison matrix, ``q``, using Shimura's special relativity formula.\n\n Parameters\n ----------\n q : 2d array, (N, N)\n Fuzzy pairwise comparison matrix.\n\n Returns\n -------\n c : 2d array, (N, N)\n Comparison matrix.\n\n \"\"\"\n return q.T / np.fmax(q, q.T).astype(np.float)\n\n\ndef fuzzy_div(x, a, y, b):\n \"\"\"\n Divide fuzzy set ``b`` into fuzzy set ``a``.\n\n Parameters\n ----------\n x : 1d array, length N\n Universe variable for fuzzy set ``a``.\n a : 1d array, length N\n Fuzzy set for universe ``x``.\n y : 1d array, length M (excluding zero array)\n Universe variable for fuzzy set ``b``.\n b : 1d array, length M\n Fuzzy set for universe ``y``.\n\n Returns\n -------\n z : 1d array\n Output variable.\n mfz : 1d array\n Fuzzy membership set for variable z.\n\n Notes\n -----\n Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering\n Applications, (2010), pp.414, Eq. 12.17.\n\n If these results are unexpected and your membership functions are convex,\n consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics\n using interval arithmetic via the restricted Dong, Shah, and Wong method.\n\n \"\"\"\n # a and x, and b and y, are formed into (MxN) matrices. The former has\n # identical rows; the latter identical identical columns.\n if np.all(np.asarray(y) == 0):\n Warning('The 0 value(s) will never be used in the calculation!')\n index = np.where(y == 0)[0]\n np.delete(y, index)\n np.delete(b, index)\n return fuzzy_op(x, a, y, b, op=np.divide)\n\n\ndef fuzzy_min(x, a, y, b):\n \"\"\"\n Find minimum between fuzzy set ``a`` fuzzy set ``b``.\n\n Parameters\n ----------\n x : 1d array, length N\n Universe variable for fuzzy set ``a``.\n a : 1d array, length N\n Fuzzy set for universe ``x``.\n y : 1d array, length M\n Universe variable for fuzzy set ``b``.\n b : 1d array, length M\n Fuzzy set for universe ``y``.\n\n Returns\n -------\n z : 1d array\n Output variable.\n mfz : 1d array\n Fuzzy membership set for variable z.\n\n Notes\n -----\n Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering\n Applications, (2010), pp.414, Eq. 12.17.\n\n If these results are unexpected and your membership functions are convex,\n consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics\n using interval arithmetic via the restricted Dong, Shah, and Wong method.\n\n \"\"\"\n return fuzzy_op(x, a, y, b, op=np.fmin)\n\n\ndef fuzzy_mult(x, a, y, b):\n \"\"\"\n Multiplies fuzzy set ``a`` and fuzzy set ``b``.\n\n Parameters\n ----------\n x : 1d array, length N\n Universe variable for fuzzy set ``a``.\n A : 1d array, length N\n Fuzzy set for universe ``x``.\n y : 1d array, length M\n Universe variable for fuzzy set ``b``.\n b : 1d array, length M\n Fuzzy set for universe ``y``.\n\n Returns\n -------\n z : 1d array\n Output variable.\n mfz : 1d array\n Fuzzy membership set for variable z.\n\n Notes\n -----\n Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering\n Applications, (2010), pp.414, Eq. 12.17.\n\n If these results are unexpected and your membership functions are convex,\n consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics\n using interval arithmetic via the restricted Dong, Shah, and Wong method.\n\n \"\"\"\n return fuzzy_op(x, a, y, b, op=np.multiply)\n\n\ndef fuzzy_sub(x, a, y, b):\n \"\"\"\n Subtract fuzzy set ``b`` from fuzzy set ``a``.\n\n Parameters\n ----------\n x : 1d array, length N\n Universe variable for fuzzy set ``a``.\n A : 1d array, length N\n Fuzzy set for universe ``x``.\n y : 1d array, length M\n Universe variable for fuzzy set ``b``.\n b : 1d array, length M\n Fuzzy set for universe ``y``.\n\n Returns\n -------\n z : 1d array\n Output variable.\n mfz : 1d array\n Fuzzy membership set for variable z.\n\n Notes\n -----\n Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering\n Applications, (2010), pp.414, Eq. 12.17.\n\n If these results are unexpected and your membership functions are convex,\n consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics\n using interval arithmetic via the restricted Dong, Shah, and Wong method.\n\n \"\"\"\n return fuzzy_op(x, a, y, b, op=np.subtract)\n\n\ndef inner_product(a, b):\n \"\"\"\n Inner product (dot product) of two fuzzy sets.\n\n Parameters\n ----------\n a : 1d array or iterable\n Fuzzy membership function.\n b : 1d array or iterable\n Fuzzy membership function.\n\n Returns\n -------\n y : float\n Fuzzy inner product value, on range [0, 1]\n\n \"\"\"\n return np.max(np.fmin(np.r_[a], np.r_[b]))\n\n\ndef interp10(x):\n \"\"\"\n Utility function which conducts linear interpolation of any rank-1 array.\n Result will have 10x resolution.\n\n Parameters\n ----------\n x : 1d array, length N\n Input array to be interpolated.\n\n Returns\n -------\n y : 1d array, length 10 * N + 1\n Linearly interpolated output.\n\n \"\"\"\n L = len(x)\n return np.interp(np.r_[0:L - 0.9:0.1], range(L), x)\n\n\ndef maxmin_composition(s, r):\n \"\"\"\n The max-min composition ``t`` of two fuzzy relation matrices.\n\n Parameters\n ----------\n s : 2d array, (M, N)\n Fuzzy relation matrix #1.\n r : 2d array, (N, P)\n Fuzzy relation matrix #2.\n\n Returns\n -------\n T ; 2d array, (M, P)\n Max-min composition, defined by ``T = s o r``.\n\n \"\"\"\n if s.ndim < 2:\n s = np.atleast_2d(s)\n if r.ndim < 2:\n r = np.atleast_2d(r).T\n m = s.shape[0]\n p = r.shape[1]\n t = np.zeros((m, p))\n\n for pp in range(p):\n for mm in range(m):\n t[mm, pp] = (np.fmin(s[mm, :], r[:, pp].T)).max()\n\n return t\n\n\ndef maxprod_composition(s, r):\n \"\"\"\n The max-product composition ``t`` of two fuzzy relation matrices.\n\n Parameters\n ----------\n s : 2d array, (M, N)\n Fuzzy relation matrix #1.\n r : 2d array, (N, P)\n Fuzzy relation matrix #2.\n\n Returns\n -------\n t : 2d array, (M, P)\n Max-product composition matrix.\n\n \"\"\"\n if s.ndim < 2:\n s = np.atleast_2d(s)\n if r.ndim < 2:\n r = np.atleast_2d(r).T\n m = s.shape[0]\n p = r.shape[1]\n t = np.zeros((m, p))\n\n for mm in range(m):\n for pp in range(p):\n t[mm, pp] = (s[mm, :] * r[:, pp].T).max()\n\n return t\n\n\ndef interp_membership(x, xmf, xx, zero_outside_x=True):\n \"\"\"\n Find the degree of membership ``u(xx)`` for a given value of ``x = xx``.\n\n Parameters\n ----------\n x : 1d array\n Independent discrete variable vector.\n xmf : 1d array\n Fuzzy membership function for ``x``. Same length as ``x``.\n xx : float or array of floats\n Value(s) on universe ``x`` where the interpolated membership is\n desired.\n zero_outside_x : bool, optional\n Defines the behavior if ``xx`` contains value(s) which are outside the\n universe range as defined by ``x``. If `True` (default), all\n extrapolated values will be zero. If `False`, the first or last value\n in ``x`` will be what is returned to the left or right of the range,\n respectively.\n\n Returns\n -------\n xxmf : float or array of floats\n Membership function value at ``xx``, ``u(xx)``. If ``xx`` is a single\n value, this will be a single value; if it is an array or iterable the\n result will be returned as a NumPy array of like shape.\n\n Notes\n -----\n For use in Fuzzy Logic, where an interpolated discrete membership function\n u(x) for discrete values of x on the universe of ``x`` is given. Then,\n consider a new value x = xx, which does not correspond to any discrete\n values of ``x``. This function computes the membership value ``u(xx)``\n corresponding to the value ``xx`` using linear interpolation.\n\n \"\"\"\n # Not much beats NumPy's built-in interpolation\n if not zero_outside_x:\n kwargs = (None, None)\n else:\n kwargs = (0.0, 0.0)\n return np.interp(xx, x, xmf, left=kwargs[0], right=kwargs[1])\n\n\ndef interp_universe(x, xmf, y):\n \"\"\"\n Find interpolated universe value(s) for a given fuzzy membership value.\n\n Parameters\n ----------\n x : 1d array\n Independent discrete variable vector.\n xmf : 1d array\n Fuzzy membership function for ``x``. Same length as ``x``.\n y : float\n Specific fuzzy membership value.\n\n Returns\n -------\n xx : list\n List of discrete singleton values on universe ``x`` whose\n membership function value is y, ``u(xx[i])==y``.\n If there are not points xx[i] such that ``u(xx[i])==y``\n it returns an empty list.\n\n Notes\n -----\n For use in Fuzzy Logic, where a membership function level ``y`` is given.\n Consider there is some value (or set of values) ``xx`` for which\n ``u(xx) == y`` is true, though ``xx`` may not correspond to any discrete\n values on ``x``. This function computes the value (or values) of ``xx``\n such that ``u(xx) == y`` using linear interpolation.\n\n \"\"\"\n # Special case required or zero-level cut does not work with faster method\n if y == 0.:\n idx = np.where(np.diff(xmf > y))[0]\n else:\n idx = np.where(np.diff(xmf >= y))[0]\n xx = x[idx] + (y-xmf[idx]) * (x[idx+1]-x[idx]) / (xmf[idx+1]-xmf[idx])\n\n # The above method is fast, but duplicates point values where\n # y == peak of a membership function. Ducking briefly into a set\n # elimniates this. Benchmarked multiple ways; this is by far the fastest.\n # Speed penalty approximately 10%, worth it.\n return [n for n in set(xx.tolist())]\n\n\ndef _interp_universe_fast(x, xmf, y):\n \"\"\"\n Find interpolated universe value(s) for a given fuzzy membership value.\n\n Fast version, with possible duplication.\n\n Parameters\n ----------\n x : 1d array\n Independent discrete variable vector.\n xmf : 1d array\n Fuzzy membership function for ``x``. Same length as ``x``.\n y : float\n Specific fuzzy membership value.\n\n Returns\n -------\n xx : list\n List of discrete singleton values on universe ``x`` whose\n membership function value is y, ``u(xx[i])==y``.\n If there are not points xx[i] such that ``u(xx[i])==y``\n it returns an empty list.\n\n Notes\n -----\n For use in Fuzzy Logic, where a membership function level ``y`` is given.\n Consider there is some value (or set of values) ``xx`` for which\n ``u(xx) == y`` is true, though ``xx`` may not correspond to any discrete\n values on ``x``. This function computes the value (or values) of ``xx``\n such that ``u(xx) == y`` using linear interpolation.\n \"\"\"\n # Special case required or zero-level cut does not work with faster method\n if y == 0.:\n idx = np.where(np.diff(xmf > y))[0]\n else:\n idx = np.where(np.diff(xmf >= y))[0]\n\n # This method is fast, but duplicates point values where\n # y == peak of a membership function.\n return x[idx] + (y-xmf[idx]) * (x[idx+1]-x[idx]) / (xmf[idx+1]-xmf[idx])\n\n\ndef modus_ponens(a, b, ap, c=None):\n \"\"\"\n Generalized *modus ponens* deduction to make approximate reasoning in a\n rules-base system.\n\n Parameters\n ----------\n a : 1d array\n Fuzzy set ``a`` on universe ``x``\n b : 1d array\n Fuzzy set ``b`` on universe ``y``\n ap : 1d array\n New fuzzy fact a' (a prime, not transpose)\n c : 1d array, OPTIONAL\n Keyword argument representing fuzzy set ``c`` on universe ``y``.\n Default = None, which will use ``np.ones()`` instead.\n\n Returns\n -------\n R : 2d array\n Full fuzzy relation.\n bp : 1d array\n Fuzzy conclusion b' (b prime)\n\n \"\"\"\n if c is None:\n c = np.ones_like(b)\n r = np.fmax(cartprod(a, b), cartprod(1 - a, c))\n bp = maxmin_composition(ap, r)\n return r, bp.squeeze()\n\n\ndef outer_product(a, b):\n \"\"\"\n Outer product of two fuzzy sets.\n\n Parameters\n ----------\n a : 1d array or iterable\n Fuzzy membership function.\n b : 1d array or iterable\n Fuzzy membership function.\n\n Returns\n -------\n y : float\n Fuzzy outer product value, on range [0, 1]\n\n \"\"\"\n return np.min(np.fmax(np.r_[a], np.r_[b]))\n\n\ndef relation_min(a, b):\n \"\"\"\n Determine fuzzy relation matrix ``R`` using Mamdani implication for the\n fuzzy antecedent ``a`` and consequent ``b`` inputs.\n\n Parameters\n ----------\n a : 1d array\n Fuzzy antecedent variable of length M.\n b : 1d array\n Fuzzy consequent variable of length N.\n\n Returns\n -------\n R : 2d array\n Fuzzy relation between ``a`` and ``b``, of shape (M, N).\n\n \"\"\"\n bb, aa = np.meshgrid(b, a, sparse=True)\n return np.fmin(aa, bb)\n\n\ndef relation_product(a, b):\n \"\"\"\n Determine the fuzzy relation matrix, ``R``, using product implication for\n the fuzzy antecedent ``a`` and the fuzzy consequent ``b``.\n\n Parameters\n ----------\n a : 1d array\n Fuzzy antecedent variable of length M.\n b : 1d array\n Fuzzy consequent variable of length N.\n\n Returns\n -------\n R : 2d array\n Fuzzy relation between ``a`` and ``b``, of shape (M, N).\n\n \"\"\"\n bb, aa = np.meshgrid(b, a, sparse=True)\n return aa * bb\n\n\ndef fuzzy_similarity(ai, b, mode='min'):\n \"\"\"\n The fuzzy similarity between set ``ai`` and observation set ``b``.\n\n Parameters\n ----------\n ai : 1d array\n Fuzzy membership function of set ``ai``.\n b : 1d array\n Fuzzy membership function of set ``b``.\n mode : string\n Controls the method of similarity calculation.\n * ``'min'`` : Computed by array minimum operation.\n * ``'avg'`` : Computed by taking the array average.\n\n Returns\n -------\n s : float\n Fuzzy similarity.\n\n \"\"\"\n if 'min' in mode.lower():\n return min(inner_product(ai, b), 1 - outer_product(ai, b))\n else:\n return (inner_product(ai, b) + (1 - outer_product(ai, b))) / 2.\n\n\ndef partial_dmf(x, mf_name, mf_parameter_dict, partial_parameter):\n \"\"\"\n Calculate the *partial derivative* of a specified membership function.\n\n Parameters\n ----------\n x : float\n input variable.\n mf_name : string\n Membership function name as a string. The following are supported:\n * ``'gaussmf'`` : parameters ``'sigma'`` or ``'mean'``\n * ``'gbellmf'`` : parameters ``'a'``, ``'b'``, or ``'c'``\n * ``'sigmf'`` : parameters ``'b'`` or ``'c'``\n mf_parameter_dict : dict\n A dictionary of ``{param : key-value, ...}`` pairs for a particular\n membership function as defined above.\n partial_parameter : string\n Name of the parameter against which we take the partial derivative.\n\n Returns\n -------\n d : float\n Partial derivative of the membership function with respect to the\n chosen parameter, at input point ``x``.\n\n Notes\n -----\n Partial derivatives of fuzzy membership functions are only meaningful for\n continuous functions. Triangular, trapezoidal designs have no partial\n derivatives to calculate. The following\n \"\"\"\n\n if mf_name == 'gaussmf':\n\n sigma = mf_parameter_dict['sigma']\n mean = mf_parameter_dict['mean']\n\n if partial_parameter == 'sigma':\n result = ((2. / sigma**3) *\n np.exp(-(((x - mean)**2) / (sigma)**2)) * (x - mean)**2)\n elif partial_parameter == 'mean':\n result = ((2. / sigma**2) *\n np.exp(-(((x - mean)**2) / (sigma)**2)) * (x - mean))\n\n elif mf_name == 'gbellmf':\n\n a = mf_parameter_dict['a']\n b = mf_parameter_dict['b']\n c = mf_parameter_dict['c']\n\n # Partial result for speed and conciseness in derived eqs below\n d = np.abs((c - x) / a)\n\n if partial_parameter == 'a':\n result = ((2. * b * (c - x)**2.) * d**((2 * b) - 2) /\n (a**3. * (d**(2. * b) + 1)**2.))\n\n elif partial_parameter == 'b':\n result = (-1 * (2 * d**(2. * b) * np.log(d)) /\n ((d**(2. * b) + 1)**2.))\n\n elif partial_parameter == 'c':\n result = ((2. * b * (x - c) * d**((2. * b) - 2)) /\n (a**2. * (d**(2. * b) + 1)**2.))\n\n elif mf_name == 'sigmf':\n\n b = mf_parameter_dict['b']\n c = mf_parameter_dict['c']\n\n if partial_parameter == 'b':\n # Partial result for speed and conciseness\n d = np.exp(c * (b + x))\n result = -1 * (c * d) / (np.exp(b * c) + np.exp(c * x))**2.\n\n elif partial_parameter == 'c':\n # Partial result for speed and conciseness\n d = np.exp(c * (x - b))\n result = ((x - b) * d) / (d + 1)**2.\n\n return result\n\n\ndef sigmoid(x, power, split=0.5):\n \"\"\"\n Intensify grayscale values in an array using a sigmoid function.\n\n Parameters\n ----------\n x : ndarray\n Input vector or image array. Should be pre-normalized to range [0, 1]\n p : float\n Power of the intensification (p > 0). Experiment with small, decimal\n values and increase as necessary.\n split : float\n Threshold for intensification. Values above ``split`` will be\n intensified, while values below `split` will be deintensified. Note\n range for ``split`` is (0, 1). Default of 0.5 is reasonable for many\n well-exposed images.\n\n Returns\n -------\n y : ndarray, same size as x\n Output vector or image with contrast adjusted.\n\n Notes\n -----\n The sigmoid used herein is defined as::\n\n y = 1 / (1 + exp(- exp(- power * (x-split))))\n\n See Also\n --------\n skfuzzy.fuzzymath.contrast\n \"\"\"\n \n return 1. / (1. + np.exp(- power * (x - split)))\n"
] | [
[
"numpy.asarray",
"numpy.zeros_like",
"numpy.exp",
"numpy.where",
"numpy.hstack",
"numpy.ones_like",
"numpy.diff",
"numpy.interp",
"numpy.zeros",
"numpy.log",
"numpy.nonzero",
"numpy.atleast_2d",
"numpy.delete",
"numpy.fmax",
"numpy.argsort",
"numpy.meshgrid",
"numpy.fmin",
"numpy.abs",
"numpy.sort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mchaker/lab-molpal | [
"f4db7ee2ca51515b4246604867a93a3aac08107d"
] | [
"scripts/fingerprints.py"
] | [
"import argparse\nimport csv\nfrom functools import partial\nimport gzip\nfrom itertools import chain, islice\nimport os\nfrom pathlib import Path\nimport sys\nfrom typing import Iterable, Iterator, List, Optional, Set, Tuple\n\nimport h5py\nimport numpy as np\nimport ray\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import rdMolDescriptors as rdmd\nfrom tqdm import tqdm\n\nsys.path.append('../molpal')\nfrom molpal import featurizer as features\n\ntry:\n if 'redis_password' in os.environ:\n ray.init(\n address=os.environ[\"ip_head\"],\n _node_ip_address=os.environ[\"ip_head\"].split(\":\")[0], \n _redis_password=os.environ['redis_password']\n )\n else:\n ray.init(address='auto')\nexcept ConnectionError:\n ray.init(num_cpus=len(os.sched_getaffinity(0)))\n\ndef get_smis(libaries: Iterable[str], title_line: bool = True,\n delimiter: str = ',', smiles_col: int = 0) -> Iterator[str]:\n for library in libaries:\n if Path(library).suffix == '.gz':\n open_ = partial(gzip.open, mode='rt')\n else:\n open_ = open\n\n with open_(library) as fid:\n reader = csv.reader(fid, delimiter=delimiter)\n if title_line:\n next(reader)\n\n for row in reader:\n yield row[smiles_col]\n\ndef batches(it: Iterable, chunk_size: int) -> Iterator[List]:\n \"\"\"Consume an iterable in batches of size chunk_size\"\"\"\n it = iter(it)\n return iter(lambda: list(islice(it, chunk_size)), [])\n\[email protected]\ndef _smis_to_mols(smis: Iterable) -> List[Optional[Chem.Mol]]:\n return [Chem.MolFromSmiles(smi) for smi in smis]\n\ndef smis_to_mols(smis: Iterable[str]) -> List[Optional[Chem.Mol]]:\n chunksize = int(ray.cluster_resources()['CPU']) * 2\n refs = [\n _smis_to_mols.remote(smis_chunk)\n for smis_chunk in batches(smis, chunksize)\n ]\n mols_chunks = [ray.get(r) for r in refs]\n return list(chain(*mols_chunks))\n\[email protected]\ndef _mols_to_fps(mols: Iterable[Chem.Mol], fingerprint: str = 'pair',\n radius: int = 2, length: int = 2048) -> np.ndarray:\n \"\"\"fingerprint functions must be wrapped in a static function\n so that they may be pickled for parallel processing\n \n Parameters\n ----------\n mols : Iterable[Chem.Mol]\n the molecules to encode\n fingerprint : str\n the the type of fingerprint to generate\n radius : int\n the radius of the fingerprint\n length : int\n the length of the fingerprint\n \n Returns\n -------\n T_comp\n the compressed feature representation of the molecule\n \"\"\"\n if fingerprint == 'morgan':\n fps = [rdmd.GetMorganFingerprintAsBitVect(\n mol, radius=radius, nBits=length, useChirality=True\n ) for mol in mols]\n\n elif fingerprint == 'pair':\n fps = [rdmd.GetHashedAtomPairFingerprintAsBitVect(\n mol, minLength=1, maxLength=1+radius, nBits=length\n ) for mol in mols]\n elif fingerprint == 'rdkit':\n fps = [rdmd.RDKFingerprint(\n mol, minPath=1, maxPath=1+radius, fpSize=length\n ) for mol in mols]\n elif fingerprint == 'maccs':\n fps = [rdmd.GetMACCSKeysFingerprint(mol) for mol in mols]\n elif fingerprint == 'map4':\n fps = [map4.MAP4Calculator(\n dimensions=length, radius=radius, is_folded=True\n ).calculate(mol) for mol in mols]\n else:\n raise NotImplementedError(f'Unrecognized fingerprint: \"{fingerprint}\"')\n\n X = np.empty((len(mols), length))\n [DataStructs.ConvertToNumpyArray(fp, x) for fp, x in zip (fps, X)]\n\n return X\n\ndef mols_to_fps(mols: Iterable[Chem.Mol], fingerprint: str = 'pair',\n radius: int = 2, length: int = 2048) -> np.ndarray:\n \"\"\"Calculate the Morgan fingerprint of each molecule\n\n Parameters\n ----------\n mols : Iterable[Chem.Mol]\n the molecules\n radius : int, default=2\n the radius of the fingerprint\n length : int, default=2048\n the number of bits in the fingerprint\n\n Returns\n -------\n List\n a list of the corresponding morgan fingerprints in bit vector form\n \"\"\"\n chunksize = int(ray.cluster_resources()['CPU'] * 16)\n refs = [\n _mols_to_fps.remote(mols_chunk, fingerprint, radius, length)\n for mols_chunk in batches(mols, chunksize)\n ]\n fps_chunks = [ray.get(r) for r in tqdm(\n refs, desc='Calculating fingerprints', unit='chunk', leave=False\n )]\n\n return np.vstack(fps_chunks)\n\ndef fps_hdf5(smis: Iterable[str], size: int,\n fingerprint: str = 'pair', radius: int = 2, length: int = 2048,\n filepath: str = 'fps.h5') -> Tuple[str, Set[int]]:\n \"\"\"Prepare an HDF5 file containing the feature matrix of the input SMILES\n strings\n\n Parameters\n ----------\n smis : Iterable[str]\n the SMILES strings from which to build the feature matrix\n size : int\n the total number of smiles strings\n fingerprint : str, default='pair'\n the type of fingerprint to calculate\n radius : int, default=2\n the \"radius\" of the fingerprint to calculate. For path-based\n fingerprints, this corresponds to the path length\n length : int, default=2048\n the length/number of bits in the fingerprint\n filepath : str, default='fps.h5'\n the filepath of the output HDF5 file\n\n Returns\n -------\n str\n the filepath of the output HDF5 file\n invalid_idxs : Set[int]\n the set of invalid indices in the input SMILES strings\n \"\"\"\n with h5py.File(filepath, 'w') as h5f:\n CHUNKSIZE = 1024\n\n fps_dset = h5f.create_dataset(\n 'fps', (size, length), chunks=(CHUNKSIZE, length), dtype='int8'\n )\n \n batch_size = 4 * CHUNKSIZE * int(ray.cluster_resources()['CPU'])\n n_batches = size//batch_size + 1\n\n invalid_idxs = set()\n i = 0\n\n for smis_batch in tqdm(\n batches(smis, batch_size), total=n_batches,\n desc='Precalculating fps', unit='batch', unit_scale=batch_size\n ):\n mols = smis_to_mols(smis_batch)\n invalid_idxs.update({\n i+j for j, mol in enumerate(mols) if mol is None\n })\n fps = mols_to_fps(\n [mol for mol in mols if mol is not None],\n fingerprint, radius, length\n )\n\n fps_dset[i:i+len(fps)] = fps\n i += len(mols)\n\n valid_size = size - len(invalid_idxs)\n if valid_size != size:\n fps_dset.resize(valid_size, axis=0)\n\n return filepath, invalid_idxs\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', type=Path,\n help='the path under which to write the fingerprints file')\n parser.add_argument('--name',\n help='what to name the fingerprints file. If no suffix is provided, will add \".h5\". If no name is provided, output file will be name <library>.h5')\n parser.add_argument('--fingerprint', default='pair',\n choices={'morgan', 'rdkit', 'pair', 'maccs', 'map4'},\n help='the type of encoder to use')\n parser.add_argument('--radius', type=int, default=2,\n help='the radius or path length to use for fingerprints')\n parser.add_argument('--length', type=int, default=2048,\n help='the length of the fingerprint')\n parser.add_argument('-l', '--libraries', required=True, nargs='+',\n help='the files containing members of the MoleculePool')\n parser.add_argument('--no-title-line', action='store_true', default=False,\n help='whether there is no title line in the library file')\n parser.add_argument('--total-size', type=int,\n help='the total number of molecules in the library file')\n parser.add_argument('--delimiter', default=',',\n help='the column separator in the library file')\n parser.add_argument('--smiles-col', default=0, type=int,\n help='the column containing the SMILES string in the library file')\n args = parser.parse_args()\n args.title_line = not args.no_title_line\n \n path = args.path or Path(args.libraries[0]).parent\n name = args.name or Path(args.libraries[0]).stem\n filepath = (path / name).with_suffix('.h5')\n\n print('Precalculating feature matrix ...', end=' ')\n\n total_size = sum(1 for _ in get_smis(\n args.libraries, args.title_line, args.delimiter, args.smiles_col\n ))\n smis = get_smis(\n args.libraries, args.title_line, args.delimiter, args.smiles_col\n )\n fps, invalid_lines = fps_hdf5(\n smis, total_size, args.fingerprint,\n args.radius, args.length, filepath\n )\n\n print('Done!')\n print(f'Feature matrix was saved to \"{fps}\"', flush=True)\n print('When using this fingerprints file, you should add '\n f'\"--invalid-lines\" {\", \".join(invalid_lines)} to the command line '\n 'or the configuration file to speed up pool construction')\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
beegieb/MultiArmedBandits | [
"0ff3b0f45281d8819ae3c2f1086a7cd9626c225f"
] | [
"algorithms.py"
] | [
"from __future__ import division\nfrom scipy import random, exp, log, sqrt, argmax, array, stats\n\nTINY = 1e-6\n\n\nclass BaseBandit(object):\n \"\"\"\n Baseclass for Bandit Algorithms. This is intended to be inherited by other Bandits to provide core functions.\n\n The BaseBandit takes care of basic initialization, and update rules. The class also exposes a number of useful\n properties for tracking metrics useful for monitoring bandit algorithms.\n\n Properties and Attributes exposed by this baseclass:\n n_arms - the number of arms available to the bandit\n draws - the number of draws performed by the bandit for each arm\n payouts - the total payouts given to the algorithm for each arm\n success - the total number of successful payouts for each arm\n expected_payouts - the expected payout for each arm\n expected_success - the expected success rate of each arm\n total_draws - the total number of draws performed by the bandit\n total_payouts - the total payout achieved by the bandit\n total_success - the total number of successful draws achieved by the bandit\n metric - the type of performance metric to use when deciding on which arm to draw\n\n Additionally, the BaseBandit provides a 'hidden' function _metric_fn which exposes the relevent performance\n metric, as a list, to all subclasses\n \"\"\"\n def __init__(self, draws=None, payouts=None, success=None, n_arms=None, metric='payout'):\n \"\"\"\n Must supply either: draws, payouts AND success OR n_arms.\n\n If draws, payouts, AND success, each must have the same length.\n\n :param draws: None or a list containing the number of draws for each arm (default = None)\n :param payouts: None or a list containing the total payouts for each arm (default = None)\n :param success: None or a list containing the success counts for each arm (default = None)\n :param n_arms: None or an int of the number of arms of the bandit (default = None)\n :param metric: Either 'payout', 'success', 'Epayout', 'Esuccess' (default = 'payout')\n Epayout, Esuccess stand for expected_payout and expected_success\n\n This is the performance metric that will be exposed via BaseBandit._metric_fn\n \"\"\"\n if draws is None or payouts is None or success is None:\n if n_arms is None:\n raise ValueError('Must give either draws, payouts, and success or n_arms')\n else:\n self.initialize(n_arms)\n else:\n if len(draws) != len(payouts) and len(draws) != len(success):\n raise ValueError('draws, payouts, and success must all have identical lengths')\n else:\n self.draws = draws\n self.payouts = payouts\n self.success = success\n\n self.metric = metric\n\n def __repr__(self):\n return \"%s(n_arms=%s, metric=%s)\" % (self.__class__.__name__, self.n_arms, self.metric)\n\n def initialize(self, n_arms):\n \"\"\"\n Initialize the bandit algorithm with lists for draws, payouts, and success\n\n :param n_arms: an int of the number of arms of the bandit\n \"\"\"\n self.draws = [0]*n_arms\n self.payouts = [0]*n_arms\n self.success = [0]*n_arms\n\n @property\n def metric(self):\n return self._metric\n\n @metric.setter\n def metric(self, new_metric):\n if new_metric in {'Epayout', 'Esuccess', 'payout', 'success'}:\n self._metric = new_metric\n else:\n raise ValueError('metric must be either \"payout\", \"success\", \"Epayout\", or \"Esuccess\"')\n\n def _metric_fn(self):\n if self.metric == 'payout':\n return self.payouts\n\n elif self.metric == 'success':\n return self.success\n\n elif self.metric == 'Epayout':\n return self.expected_payouts\n\n elif self.metric == 'Esuccess':\n return self.expected_success\n\n @property\n def total_draws(self):\n return sum(self.draws)\n\n @property\n def total_success(self):\n return sum(self.success)\n\n @property\n def total_payouts(self):\n return sum(self.payouts)\n\n @property\n def n_arms(self):\n return len(self.draws)\n\n @property\n def expected_success(self):\n return [s/d if d > 0 else 0 for s, d in zip(self.success, self.draws)]\n\n @property\n def expected_payouts(self):\n return [p/d if d > 0 else 0 for p, d in zip(self.payouts, self.draws)]\n\n def update(self, selected_arm, payout):\n \"\"\"\n Update the bandits parameters by incrementing each of:\n draws[selected_arm], payouts[selected_arm], and success[selected_arm]\n\n :param selected_arm: an int on interval [0, n_arms)\n :param payout: the total payout recieved from selected_arm\n \"\"\"\n self.draws[selected_arm] += 1\n self.payouts[selected_arm] += payout\n self.success[selected_arm] += 1 if payout > 0 else 0\n\n def draw(self):\n raise NotImplementedError('This is a baseclass, inherit this class and implement a \"draw\" method')\n\n\ndef linear_schedule(t):\n return 1 / (t + TINY)\n\n\ndef logarithmic_schedule(t):\n return 1 / log(t + 1 + TINY)\n\n\nclass AnnealedBaseBandit(BaseBandit):\n \"\"\"\n A subclass of BaseBandit intended to be inherited by annealing bandit algorithms\n\n Exposes the property:\n schedule - the type of annealing schedule for temperature updates\n\n Exposes the hidden method:\n _schedule_fn which outputs the current temperature at the current iteration\n \"\"\"\n def __init__(self, schedule='logarithmic', **kwargs):\n \"\"\"\n :param schedule: either 'logarithmic' or 'linear' (default = 'logarithmic')\n 'logarithmic' schedule updates temperature(iter_t) = 1 / log(t + 1 + 1e-6)\n 'linear' schedule updates temperature(iter_t) = 1 / (t + 1e-6)\n :param kwargs: Arguments that will be passed to the superclass BaseBandit\n \"\"\"\n self.schedule = schedule\n super(AnnealedBaseBandit, self).__init__(**kwargs)\n\n def __repr__(self):\n return \"%s(schedule=%s, n_arms=%s, metric=%s)\" % (self.__class__.__name__, self.schedule,\n self.n_arms, self.metric)\n\n @property\n def schedule(self):\n return self._schedule_name\n\n @schedule.setter\n def schedule(self, new):\n if new == 'linear':\n self._schedule_name = new\n self._schedule_fn = linear_schedule\n elif new == 'logarithmic':\n self._schedule_name = new\n self._schedule_fn = logarithmic_schedule\n else:\n raise ValueError('Incorrect value for annealing schedule. Got %s. Expected \"linear\" or \"logarithmic\"' % new)\n\n\nclass EpsilonGreedyBandit(BaseBandit):\n \"\"\"\n The EpsilonGreedyBandit greedily selects the arm with the highest performing metric with probability (1-epsilon)\n and selects any arm, uniformly at random, with probability epsilon\n \"\"\"\n def __init__(self, epsilon=0.1, **kwargs):\n \"\"\"\n :param epsilon: a float on the interval [0, 1] (default = 0.1)\n explore arms with probability epsilon, and exploit with probability (1 - epsilon)\n :param kwargs: Arguments to pass to the BaseBandit superclass\n \"\"\"\n self.epsilon = epsilon\n super(EpsilonGreedyBandit, self).__init__(**kwargs)\n\n def draw(self):\n \"\"\"\n Draws the best arm with probability (1 - epsilon)\n Draws any arm at random with probility epsilon\n\n :return: The numerical index of the selected arm\n \"\"\"\n if random.rand() < self.epsilon:\n return random.choice(self.n_arms)\n else:\n return argmax(self._metric_fn())\n\n\nclass AnnealedEpsilonGreedyBandit(AnnealedBaseBandit):\n \"\"\"\n An annealed version of the EpsilonGreedyBandit.\n\n Epsilon decreases over time proportional to the temperature given by the annealing schedule\n\n This has the effect of pushing the algorithm towards exploitation as time progresses\n \"\"\"\n def __init__(self, epsilon=1.0, **kwargs):\n \"\"\"\n :param epsilon: float on the interval [0, 1] (default = 1.0)\n :param kwargs: Arguments to pass to AnnealedBaseBandit superclass\n \"\"\"\n self.epsilon = epsilon\n super(AnnealedEpsilonGreedyBandit, self).__init__(**kwargs)\n\n def draw(self):\n \"\"\"\n Draws the best arm with probability (1 - epsilon * temp)\n Draws any arm with probability epsilon * temp\n\n :return: The numerical index of the selected arm\n \"\"\"\n temp = self._schedule_fn(self.total_draws)\n if random.rand() < self.epsilon * temp:\n return random.choice(self.n_arms)\n else:\n return argmax(self._metric_fn())\n\n\ndef softmax(l):\n ex = exp(array(l) - max(l))\n return ex / ex.sum()\n\n\nclass SoftmaxBandit(BaseBandit):\n \"\"\"\n SoftmaxBandit selects arms stochastically by creating a categorical distribution across arms via a softmax function\n \"\"\"\n def draw(self):\n \"\"\"\n Selects arm i with probability distribution given by the softmax:\n P(arm_i) = exp(metric_i) / Z\n\n Where Z is the normalizing constant:\n Z = sum(exp(metric_i) for i in range(n_arms))\n\n :return: The numerical index of the selected arm\n \"\"\"\n return argmax(random.multinomial(1, pvals=softmax(self._metric_fn())))\n\n\nclass AnnealedSoftmaxBandit(AnnealedBaseBandit):\n \"\"\"\n Annealed version of the SoftmaxBandit\n \"\"\"\n def draw(self):\n \"\"\"\n Selects arm i with probability distribution given by the softmax:\n P(arm_i) = exp(metric_i / temperature) / Z\n\n Where Z is the normalizing constant:\n Z = sum(exp(metric_i / temperature) for i in range(n_arms))\n\n :return: The numerical index of the selected arm\n \"\"\"\n temp = self._schedule_fn(self.total_draws)\n return argmax(random.multinomial(1, pvals=softmax(array(self._metric_fn()) / temp)))\n\n\nclass DirichletBandit(BaseBandit):\n \"\"\"\n DirichletBandit selects arms stochastichally from a categorical distribution sampled from a Dirichlet distribution\n\n This bandit samples priors for the categorical distribution, and then randomly selects the arm from the given\n categorical distribution\n \"\"\"\n def __init__(self, random_sample=True, sample_priors=True, **kwargs):\n \"\"\"\n :param random_sample: a boolean (default True)\n if True, the selected arm is drawn at random from a categorical distribution\n if False, the argmax from categorical parameters is returned as the selected arm\n :param sample_priors: a boolean (default True)\n if True, parameter for the categorical are sampled at random from a Dirichlet distribution\n if False, parameters for the categorical are given by the mean of a Dirichlet distribution\n :param kwargs: Arguments to pass to BaseBandit superclass\n \"\"\"\n self.random_sample = random_sample\n self.sample_priors = sample_priors\n super(DirichletBandit, self).__init__(**kwargs)\n\n def draw(self):\n \"\"\"\n if sample_priors = True and random_sample = True:\n draw returns a random draw of a categorical distribution with parameters drawn from a Dirichlet distribution\n the hyperparameters on the Dirichlet are given by the bandit's metric with laplacian smoothing\n if sample_priors = False and random_sample = True:\n draw returns a random draw of a categorical distribution with parameters given by the bandit's metric\n if sample_priors = True and random_sample = False:\n draw returns argmax(random.dirichlet((x_0 + 1, ... , x_n_arms + 1))) where x_i is the ith value returned by\n the bandit's metric.\n if sample_priors = False and random_sample = False:\n become a purely greedy bandit with the selected arm given by argmax(metric)\n\n :return: The numerical index of the selected arm\n \"\"\"\n x = array(self._metric_fn()) + 1\n\n if self.sample_priors:\n pvals = random.dirichlet(x)\n else:\n pvals = x / sum(x)\n\n if self.random_sample:\n return argmax(random.multinomial(1, pvals=pvals))\n else:\n return argmax(pvals)\n\n\nclass AnnealedDirichletBandit(AnnealedBaseBandit):\n \"\"\"\n Nearly identical to the DirichletBandit, the only difference is annealing is applied when samping parameters from\n the Dirichlet Distribution. Annealing has the effect of reducing the variance in samples pulled from the Dirichlet\n distribution as the temperature decreases.\n \"\"\"\n def __init__(self, random_sample=True, sample_priors=True, **kwargs):\n \"\"\"\n :param random_sample: a boolean (default True)\n if True, the selected arm is drawn at random from a categorical distribution\n if False, the argmax from categorical parameters is returned as the selected arm\n :param sample_priors: a boolean (default True)\n if True, parameter for the categorical are sampled at random from a Dirichlet distribution\n if False, parameters for the categorical are given by the mean of a Dirichlet distribution\n :param kwargs: Arguments to pass to AnnealedBaseBandit superclass\n \"\"\"\n self.random_sample = random_sample\n self.sample_priors = sample_priors\n super(AnnealedDirichletBandit, self).__init__(**kwargs)\n\n def draw(self):\n \"\"\"\n if sample_priors = True and random_sample = True:\n draw returns a random draw of a categorical distribution with parameters drawn from a Dirichlet distribution\n the hyperparameters on the Dirichlet are given by the bandit's metric with laplacian smoothing\n if sample_priors = False and random_sample = True:\n draw returns a random draw of a categorical distribution with parameters given by the bandit's metric\n if sample_priors = True and random_sample = False:\n draw returns argmax(random.dirichlet((x_0 + 1, ... , x_n_arms + 1))) where x_i is the ith value returned by\n the bandit's metric.\n if sample_priors = False and random_sample = False:\n become a purely greedy bandit with the selected arm given by argmax(metric)\n\n :return: The numerical index of the selected arm\n \"\"\"\n temp = self._schedule_fn(self.total_draws)\n x = array(self._metric_fn()) * temp + 1\n\n if self.sample_priors:\n pvals = random.dirichlet(x)\n else:\n pvals = x / sum(x)\n\n if self.random_sample:\n return argmax(random.multinomial(1, pvals=pvals))\n else:\n return argmax(pvals)\n\n\nclass UCBBetaBandit(BaseBandit):\n \"\"\"\n An Upper Confidence Bound bandit that assumes each arm's chance of success is given by a Bernoulli distribution,\n and the payout of each arm is identical\n\n The bandit assumes the Bernoulli parameters are generated from a Beta prior whose uncertainty can be quantified\n\n Arms are selected deterministically by selecting the arm with the highest estimated upper confidence bound on\n the beta priors\n \"\"\"\n def __init__(self, conf=0.95, **kwargs):\n \"\"\"\n :param conf: The 2-sided confidence interval to use when calculating the Upper Confidence Bound (default 0.95)\n :param kwargs: Arguments to pass to BaseBandit superclass\n\n Note: metric is ignored in this bandit algorithm. The beta distribution parameters are given by success and\n failure rates of each individual arm\n \"\"\"\n self.conf = conf\n super(UCBBetaBandit, self).__init__(**kwargs)\n\n def draw(self):\n \"\"\"\n Selects the arm to draw based on the upper bounds of each arm's confidence interval\n\n Specifically returns: argmax([... beta(succ_i + 1, fail_i + 1).interval(conf) ... ])\n where succ_i and fail_i are the total number of successful and failed pulls for the ith arm\n\n :return: The numerical index of the selected arm\n \"\"\"\n succ = array(self.success)\n fail = array(self.draws) - succ\n beta = stats.beta(succ + 1, fail + 1)\n\n return argmax(beta.interval(self.conf)[1])\n\n\nclass RandomBetaBandit(BaseBandit):\n \"\"\"\n The RandomBetaBandit has similar assumptions to the UCBBetaBandit. But instead of estimating the probability of\n success for each arm by looking at the upper confidence bound, this bandit instead samples the probability of\n success for each arm from a beta distribution\n\n This has the effect of introducing randomness into the process of selecting arms, while accounting for uncertainty\n in the success rates of individual arms. There is also the added bonus that sampling is computationally faster\n than computing upper confidence bounds on a Beta distribution\n \"\"\"\n def draw(self):\n \"\"\"\n Selects the arm with the largest sampled probability of success\n\n Specifically returns: argmax([... random.beta(succ_i + 1, fail_i + 1) ... ])\n where succ_i and fail_i are the total number of successful and failed pulls for the ith arm\n\n :return: The numerical index of the selected arm\n \"\"\"\n succ = array(self.success)\n fail = array(self.draws) - succ\n rvs = random.beta(succ + 1, fail + 1)\n\n return argmax(rvs)\n\n\nclass UCB1Bandit(BaseBandit):\n \"\"\"\n Implements the UCB1 algorithm, one of the simplest in the UCB family of bandits.\n\n The implementation details can be found in the following publication:\n http://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf\n \"\"\"\n def draw(self):\n \"\"\"\n Draws arm based on the highest expected reward with a bonus given for uncertainty.\n\n Concretely:\n draws argmax([... expected_payout[i] + sqrt(2*log(T[i]) / draws[i]) ...])\n\n :return: The numerical index of the selected arm\n \"\"\"\n t = 2*log(self.total_draws)\n\n return argmax([float('inf') if d == 0 else e + sqrt(t/d) for e, d in zip(self.expected_payouts, self.draws)])\n\n\nclass UCBGaussianBandit(BaseBandit):\n \"\"\"\n UCBGaussianBandit is another UCB bandit that models expected payout for each arm as a univariate-gaussian\n distribution. The bandit selects the arm with the highest 95% confidence bound for expected reward, which is\n computed in closed form using the approximation:\n upper_bound[i] = mean[i] + 1.96 * std[i]\n\n This model uses an online algorithm for computing variance described on Wikipedia:\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm\n \"\"\"\n\n def initialize(self, n_arms):\n \"\"\"\n Initialize the bandit algorithm with lists for draws, payouts, success, and online variance\n\n :param n_arms: an int of the number of arms of the bandit\n \"\"\"\n self.M2 = [0 for _ in range(n_arms)]\n super(UCBGaussianBandit, self).initialize(n_arms)\n\n def update(self, selected_arm, payout):\n \"\"\"\n Update the bandits parameters by incrementing each of:\n draws[selected_arm], payouts[selected_arm], and success[selected_arm]\n\n Also updates tracking for online variance estimates\n\n :param selected_arm: an int on interval [0, n_arms)\n :param payout: the total payout recieved from selected_arm\n \"\"\"\n delta = payout - self.expected_payouts[selected_arm]\n super(UCBGaussianBandit, self).update(selected_arm, payout)\n mean = self.expected_payouts[selected_arm]\n self.M2[selected_arm] += delta * (payout - mean)\n\n def draw(self):\n \"\"\"\n If an arm has been drawn less than 2 times, select that arm\n\n Otherwise return:\n argmax([ ... expected_reward[i] + 1.96 * std[i] ...])\n\n :return: The numerical index of the selected arm\n \"\"\"\n mu = self.expected_payouts\n M2 = self.M2\n counts = self.draws\n\n return argmax(float('inf') if n < 2 else m + 1.96 * sqrt(s / (n - 1)) for m, s, n in zip(mu, M2, counts))\n\n\nclass RandomGaussianBandit(UCBGaussianBandit):\n \"\"\"\n Similar model to the UCBGaussianBandit, the difference being the model randomly samples the estimates for\n expected reward from the learned gaussians. This adds randomness the draws allowing the algorithm to better handle\n settings with delayed feedback.\n\n Some imperical tests also provide evidence that this algorithm outperforms the UCBGaussianBandit in settings with\n instantanious feedback, but this is not a proven fact. Use that observation with caution.\n \"\"\"\n def draw(self):\n \"\"\"\n If an arm has been drawn less than 2 times, select that arm\n\n Otherwise return:\n argmax([ ... random.normal(mean=expected_return[i], sd=std[i]) ...])\n\n :return: The numerical index of the selected arm\n \"\"\"\n mu = array(self.expected_payouts)\n sd = array([float('inf') if n < 2 else sqrt(s / (n - 1)) for s, n in zip(self.M2, self.draws)])\n\n return argmax(random.randn(self.n_arms) * sd + mu)"
] | [
[
"scipy.random.multinomial",
"scipy.log",
"scipy.argmax",
"scipy.stats.beta",
"scipy.sqrt",
"scipy.random.beta",
"scipy.random.randn",
"scipy.random.choice",
"scipy.array",
"scipy.random.dirichlet",
"scipy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fsx950223/TensorRT | [
"ab20a8ac26da03df5e434b4f9d1da0156ca019b6"
] | [
"samples/python/uff_custom_plugin/model.py"
] | [
"#\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nimport graphsurgeon as gs\nimport uff\n\n# lenet5.py\nfrom lenet5 import ModelData\n\nWORKING_DIR = os.environ.get(\"TRT_WORKING_DIR\") or os.path.dirname(os.path.realpath(__file__))\n\n# Path to which trained model will be saved (check README.md)\nMODEL_PATH = os.path.join(\n WORKING_DIR,\n 'models/trained_lenet5.pb'\n)\n\n# Generates mappings from unsupported TensorFlow operations to TensorRT plugins\ndef prepare_namespace_plugin_map():\n # In this sample, the only operation that is not supported by TensorRT\n # is tf.nn.relu6, so we create a new node which will tell UffParser which\n # plugin to run and with which arguments in place of tf.nn.relu6.\n\n\n # The \"clipMin\" and \"clipMax\" fields of this TensorFlow node will be parsed by createPlugin,\n # and used to create a CustomClipPlugin with the appropriate parameters.\n trt_relu6 = gs.create_plugin_node(name=\"trt_relu6\", op=\"CustomClipPlugin\", clipMin=0.0, clipMax=6.0)\n namespace_plugin_map = {\n ModelData.RELU6_NAME: trt_relu6\n }\n return namespace_plugin_map\n\n# Transforms model path to uff path (e.g. /a/b/c/d.pb -> /a/b/c/d.uff)\ndef model_path_to_uff_path(model_path):\n uff_path = os.path.splitext(model_path)[0] + \".uff\"\n return uff_path\n\n# Converts the TensorFlow frozen graphdef to UFF format using the UFF converter\ndef model_to_uff(model_path):\n # Transform graph using graphsurgeon to map unsupported TensorFlow\n # operations to appropriate TensorRT custom layer plugins\n dynamic_graph = gs.DynamicGraph(model_path)\n dynamic_graph.collapse_namespaces(prepare_namespace_plugin_map())\n # Save resulting graph to UFF file\n output_uff_path = model_path_to_uff_path(model_path)\n uff.from_tensorflow(\n dynamic_graph.as_graph_def(),\n [ModelData.OUTPUT_NAME],\n output_filename=output_uff_path,\n text=True\n )\n return output_uff_path\n\ndef main():\n # Load pretrained model\n if not os.path.isfile(MODEL_PATH):\n raise IOError(\"\\n{}\\n{}\\n{}\\n\".format(\n \"Failed to load model file ({}).\".format(MODEL_PATH),\n \"Please use 'python lenet5.py' to train and save the model.\",\n \"For more information, see the included README.md\"\n ))\n\n uff_path = model_to_uff(MODEL_PATH)\n print(\"Saved converted UFF model to: \" + uff_path)\n\nif __name__ == \"__main__\":\n main()\n\n"
] | [
[
"tensorflow.logging.set_verbosity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
jiansfoggy/16-720B | [
"6395555449fa297f19efb42970e480f1b382e38a"
] | [
"HW3/code/InverseCompositionAffine.py"
] | [
"import numpy as np\nfrom scipy.interpolate import RectBivariateSpline\n\ndef validate_coords(y, x, ny, nx):\n\ta = np.logical_and(np.logical_and(x>=0, x<=nx-1), np.logical_and(y>=0, y<=ny-1))\n\treturn a.nonzero()[0]\n\ndef InverseCompositionAffine(It, It1):\n\t# Input: \n\t#\tIt: template image\n\t#\tIt1: Current image\n\n\t# Output:\n\t#\tM: the Affine warp matrix [2x3 numpy array]\n\n # put your implementation here\n\tM = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])\n\n\th,w = It.shape\n\tx = np.arange(w)\n\ty = np.arange(h)\n\t\n\tIt_y, It_x = np.gradient(It)\n\n\tIt_spline = RectBivariateSpline(y, x, It)\n\tIt1_spline = RectBivariateSpline(y, x, It1)\n\tIt_x_spline = RectBivariateSpline(y, x, It_x)\n\tIt_y_spline = RectBivariateSpline(y, x, It_y)\n\n\t\n\txt,yt = np.meshgrid(x, y)\n\txt = np.reshape(xt, (-1,1))\n\tyt = np.reshape(yt, (-1,1))\n\t\n\ttemplate = np.array(It_spline.ev(yt,xt).tolist())\n\ttemplate = template.ravel()\n\t# print (\"Template shape is {0}\".format(template.shape))\n\n\tpatch_x = np.reshape(np.array(It_x_spline.ev(yt,xt).tolist()), (-1,1))\n\tpatch_y = np.reshape(np.array(It_y_spline.ev(yt,xt).tolist()), (-1,1))\n\tA = np.hstack((np.multiply(yt,patch_y), np.multiply(xt,patch_y), patch_y, np.multiply(yt,patch_x), np.multiply(xt,patch_x), patch_x))\n\ta = np.ones((xt.shape[0],1))\n\txy1 = np.hstack((yt,xt,a))\n\n\ttol = 0.1\n\titer1 = 0\n\twhile (True):\n\t\taffine_homogenous = np.matmul(M,xy1.T)\n\t\tvalid_coords = validate_coords(affine_homogenous[0], affine_homogenous[1], h, w)\n\t\tC = A[valid_coords,:]\n\t\tH = np.matmul(C.T,C)\n\n\t\tyi = affine_homogenous[0, valid_coords]\n\t\txi = affine_homogenous[1, valid_coords]\n\n\t\timage = np.array(It1_spline.ev(yi, xi).tolist())\n\t\ttemp_template = template[valid_coords]\n\t\tb = image - temp_template\n\t\tb = np.matmul(C.T, b)\n\n\t\tdeltap = np.linalg.lstsq(H,b,rcond=None)[0]\n\t\tdeltaM = np.reshape(deltap, (2,3))\n\n\t\tM = M - deltaM\n\t\ta = np.linalg.norm(deltaM)\n\t\tif a < tol:\n\t\t\t# print (iter1)\n\t\t\tbreak\n\n\t\titer1 += 1\n\treturn M"
] | [
[
"numpy.hstack",
"scipy.interpolate.RectBivariateSpline",
"numpy.meshgrid",
"numpy.gradient",
"numpy.reshape",
"numpy.arange",
"numpy.logical_and",
"numpy.multiply",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.ones",
"numpy.linalg.lstsq",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
marlohmann/allenact | [
"f29dd6f0ec62425b02ca07fee815b1a82627a28e"
] | [
"projects/tutorials/babyai_go_to_local_bc_offpolicy.py"
] | [
"import os\nfrom typing import Optional, List, Tuple\n\nimport torch\nfrom gym_minigrid.minigrid import MiniGridEnv\n\nfrom plugins.babyai_plugin.babyai_constants import BABYAI_EXPERT_TRAJECTORIES_DIR\nfrom plugins.minigrid_plugin.minigrid_offpolicy import (\n MiniGridOffPolicyExpertCELoss,\n create_minigrid_offpolicy_data_iterator,\n)\nfrom projects.babyai_baselines.experiments.go_to_local.base import (\n BaseBabyAIGoToLocalExperimentConfig,\n)\nfrom utils.experiment_utils import PipelineStage, OffPolicyPipelineComponent\n\n\nclass BCOffPolicyBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig):\n \"\"\"BC Off policy imitation.\"\"\"\n\n DATASET: Optional[List[Tuple[str, bytes, List[int], MiniGridEnv.Actions]]] = None\n\n GPU_ID = 0 if torch.cuda.is_available() else None\n\n @classmethod\n def tag(cls):\n return \"BabyAIGoToLocalBCOffPolicy\"\n\n @classmethod\n def METRIC_ACCUMULATE_INTERVAL(cls):\n return 1\n\n @classmethod\n def training_pipeline(cls, **kwargs):\n total_train_steps = cls.TOTAL_IL_TRAIN_STEPS\n ppo_info = cls.rl_loss_default(\"ppo\", steps=-1)\n\n num_mini_batch = ppo_info[\"num_mini_batch\"]\n update_repeats = ppo_info[\"update_repeats\"]\n\n return cls._training_pipeline(\n named_losses={\n \"offpolicy_expert_ce_loss\": MiniGridOffPolicyExpertCELoss(\n total_episodes_in_epoch=int(1e6)\n ),\n },\n pipeline_stages=[\n PipelineStage(\n loss_names=[],\n max_stage_steps=total_train_steps,\n offpolicy_component=OffPolicyPipelineComponent(\n data_iterator_builder=lambda **kwargs: create_minigrid_offpolicy_data_iterator(\n path=os.path.join(\n BABYAI_EXPERT_TRAJECTORIES_DIR,\n \"BabyAI-GoToLocal-v0{}.pkl\".format(\n \"\" if torch.cuda.is_available() else \"-small\"\n ),\n ),\n nrollouts=cls.NUM_TRAIN_SAMPLERS // num_mini_batch,\n rollout_len=cls.ROLLOUT_STEPS,\n instr_len=cls.INSTR_LEN,\n **kwargs,\n ),\n loss_names=[\"offpolicy_expert_ce_loss\"],\n updates=num_mini_batch * update_repeats,\n ),\n ),\n ],\n num_mini_batch=0,\n update_repeats=0,\n total_train_steps=total_train_steps,\n )\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WadhwaniAI/utilities | [
"3ca351302d8381926ef562e52f6454a2ea230cac"
] | [
"utilities/viz.py"
] | [
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\nfrom sklearn.metrics import classification_report\n\ndef gt_vs_pred(targets, predictions):\n sorted_ind = np.argsort(targets)\n targets = targets[sorted_ind]\n predictions = predictions[sorted_ind]\n fig = plt.figure()\n plt.plot(targets, targets, c='g')\n plt.scatter(targets, predictions, c='k', s=5)\n plt.xlabel('True Weight')\n plt.ylabel('Predicted Weight')\n plt.legend()\n\n return fig\n\n\ndef transform_data(data, function):\n return pd.Series(list(map(function, data))).to_numpy()\n\n\ndef compute_classification_report(targets, predictions, target_names):\n class_report = classification_report(targets, predictions, target_names=target_names, output_dict=True)\n\n del class_report['accuracy']\n del class_report['weighted avg']\n\n df_classification_report = pd.DataFrame.from_dict(class_report, orient='index')\n df_classification_report['support-pct'] = df_classification_report['support'] / df_classification_report.loc['macro avg', 'support']\n\n keys = ['precision', 'recall', 'f1-score', 'support-pct']\n functions = [lambda x: round(x, 2), lambda x: round(x, 2), lambda x: round(x, 2), lambda x: round(100 * x, 1)]\n\n for key, function in zip(keys, functions):\n df_classification_report[key] = transform_data(df_classification_report[key], function)\n\n data = np.hstack((np.array(df_classification_report.index).reshape((-1, 1)),\n df_classification_report.to_numpy())).tolist()\n\n\n columns = [''] + df_classification_report.columns[:].tolist()\n return data, columns\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame.from_dict",
"numpy.argsort",
"numpy.array",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Chappie733/Polynomial-Neural-Network-comparison | [
"550a91ee3c6ea4cdd16bfee0fddc277c038b9166"
] | [
"PolLayer.py"
] | [
"import numpy as np\r\nimport random\r\n\r\nMAX_INITIAL_WEIGHT = -1\r\nMIN_INITIAL_WEIGHT = 1\r\n\r\nMAX_INITIAL_BIAS = -1\r\nMIN_INITIAL_BIAS = 1\r\n\r\nMAX_INITIAL_EXP = 1\r\nMIN_INITIAL_EXP = 1\r\n\r\ndef norm(x):\r\n\treturn 1 if x>= 0 else -1\r\n\r\ndef np_pow(a, b):\r\n\treturn np.sign(a) * (np.abs(a) ** b)\r\n\r\n# 1 -> bias\r\n# 2 -> weight\r\n# 3 -> exps\r\ndef r(t):\r\n\tif t == 1:\r\n\t\treturn random.uniform(MIN_INITIAL_BIAS, MAX_INITIAL_BIAS)\r\n\telif t == 2:\r\n\t\treturn random.uniform(MIN_INITIAL_WEIGHT, MAX_INITIAL_WEIGHT)\r\n\telse:\r\n\t\treturn random.uniform(MIN_INITIAL_EXP, MAX_INITIAL_EXP)\r\n\r\ndef sigmoid(x, deriv=False):\r\n\treturn 1/(1+np.exp(-x)) if not deriv else sigmoid(x)*(1-sigmoid(x))\r\n\r\ndef none(x, deriv=False):\r\n\treturn x if not deriv else 1\r\n\r\nclass PolLayer:\r\n\r\n\tdef __init__(self, n_neurons, activation=none, dtype=np.float64):\r\n\t\tself.n_neurons = n_neurons\r\n\t\tself.func = activation\r\n\t\tself.dtype = dtype\r\n\t\tself.neurons = np.array([0 for i in range(n_neurons)], dtype=dtype)\r\n\t\tself.biases = np.array([r(1) for _ in range(self.n_neurons)], dtype=dtype)\r\n\r\n\tdef log(self, next_layer):\r\n\t\tself.weights = np.array([[r(2) for _ in range(self.n_neurons)] for n in range(next_layer.n_neurons)], dtype=self.dtype)\r\n\t\tself.exps = np.array([[r(3) for _ in range(self.n_neurons)] for n in range(next_layer.n_neurons)], dtype=self.dtype)\r\n\r\n\tdef feed(self, prev):\r\n\t\tif isinstance(prev, PolLayer):\r\n\t\t\tfor i in range(self.n_neurons):\r\n\t\t\t\tself.neurons[i] = prev.activation(i, bias=self.biases[i])\r\n\t\telse:\r\n\t\t\tself.neurons = prev\r\n\r\n\tdef activation(self, n_index, bias=0):\r\n\t\treturn self.func(np.dot(np_pow(self.neurons, self.exps[n_index]), self.weights[n_index])+bias)\r\n\r\n\tdef get_z(self, n_index, bias=0):\r\n\t\treturn np.dot(np_pow(self.neurons, self.exps[n_index]), self.weights[n_index])+bias\r\n"
] | [
[
"numpy.sign",
"numpy.exp",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aktgpt/brevis | [
"0c3dcabd241ea50cafbc2012250804e1ecb7555e"
] | [
"brevis/test/lupi_tester.py"
] | [
"import os\nimport random\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom skimage.metrics import structural_similarity as ssim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom tqdm import tqdm\nfrom sklearn.metrics import mean_absolute_error\n\n\ndef calculate_ssim(im1, im2, data_range=255, multichannel=True):\n if multichannel:\n full_ssim = ssim(im1, im2, val_range=data_range, multichannel=True, full=True)[1]\n out_ssim = full_ssim.mean()\n else:\n full_ssim = ssim(im1, im2, val_range=data_range, multichannel=False, full=True)[1]\n out_ssim = full_ssim.mean()\n\n return out_ssim\n\n\nclass LUPITester:\n def __init__(self, config, save_folder, save_softmax=False):\n self.config = config\n self.save_folder = save_folder\n self.model_checkpoint = torch.load(\n os.path.join(save_folder, config[\"model_path\"])\n )\n self.image_folder = os.path.join(self.save_folder, \"test_images\")\n if not os.path.exists(self.image_folder):\n os.makedirs(self.image_folder)\n self.save_softmax = save_softmax\n if self.save_softmax:\n self.softmax_save_folder = os.path.join(self.image_folder, \"softmax\")\n if not os.path.exists(self.softmax_save_folder):\n os.makedirs(self.softmax_save_folder)\n\n def test(self, dataloader, models):\n # self.models = []\n # for model in models:\n model = models[0]\n model = model.cuda()\n self.model = torch.nn.DataParallel(model)\n\n # self.model = self.models[0]\n # self.model.module.load_state_dict(self.model_checkpoint)\n self.model.load_state_dict(self.model_checkpoint[\"model_state_dict\"])\n self.model.eval()\n\n best_train_epoch = self.model_checkpoint[\"epoch\"]\n best_train_loss = self.model_checkpoint[\"epoch_loss\"]\n print(f\"Epoch:{best_train_epoch},Loss:{best_train_loss}\")\n\n test_iou, test_ssim, test_loss = self._test_epoch(dataloader)\n\n df = pd.DataFrame({\"IOU\": test_iou, \"SSIM\": test_ssim, \"MAE\": test_loss})\n df.to_csv(os.path.join(self.save_folder, \"losses_test.csv\"), index=False)\n\n def _test_epoch(self, dataloader):\n MAE_criterion = torch.nn.L1Loss()\n\n ssims = []\n MAEs = []\n ious = []\n target_ims = []\n with torch.no_grad():\n for batch_idx, sample in enumerate(tqdm(dataloader)):\n image_name = sample[2]\n preprocess_step = sample[3]\n preprocess_stats = sample[4]\n magnification = sample[5]\n\n input = sample[0].cuda().to(non_blocking=True)\n mask = sample[1][:, 0].unsqueeze(1).cuda().to(non_blocking=True)\n mask_onehot = F.one_hot(mask.long()).squeeze(1).permute(0, 3, 1, 2)\n C_mask_out = mask_onehot.shape[1]\n target = sample[1][:, 1].unsqueeze(1).cuda().to(non_blocking=True)\n C_out = target.shape[1]\n\n output_mask, output, mask_op_softmax = self.infer_full_image(\n input, C_out, C_mask_out, kernel_size=512, stride=256\n )\n\n if self.save_softmax:\n np.save(\n os.path.join(\n self.softmax_save_folder, f\"softmax_{image_name[0]}\",\n ),\n mask_op_softmax.astype(np.float32),\n )\n np.save(\n os.path.join(self.softmax_save_folder, f\"mask_{image_name[0]}\",),\n mask.cpu()\n .squeeze(0)\n .numpy()\n .transpose(1, 2, 0)\n .astype(np.float32),\n )\n\n intersection = torch.logical_and(mask, output_mask)\n union = torch.logical_or(mask, output_mask)\n iou = torch.true_divide(torch.sum(intersection), torch.sum(union))\n ious.append(iou.item())\n\n output_8bit = (\n (output[0] * 255)\n .detach()\n .cpu()\n .numpy()\n .transpose(1, 2, 0)\n .astype(\"uint8\")\n )\n target_8bit = (\n (target[0] * 255)\n .detach()\n .cpu()\n .numpy()\n .transpose(1, 2, 0)\n .astype(\"uint8\")\n )\n ssims.append(\n calculate_ssim(\n target_8bit, output_8bit, data_range=255, multichannel=True,\n )\n )\n\n output, target = self.write_output_images(\n output[0],\n target[0],\n output_mask[0],\n mask[0],\n image_name,\n preprocess_step[0],\n preprocess_stats,\n magnification[0],\n )\n\n MAEs.append(\n mean_absolute_error(\n output[:, :, 0].astype(\"float32\"),\n target[:, :, 0].astype(\"float32\"),\n ).item()\n )\n \n target_ims.append(target[:, :, 0].astype('float32'))\n\n gt_median = np.median(target_ims)\n print(\"Test MAE loss: \", np.mean(MAEs)/ gt_median, \" \", \"\\u00B1\", \" \", np.std(MAEs)/ gt_median)\n\n print(\n \"Test SSIMs: \", np.mean(ssims), \" \", \"\\u00B1\", \" \", np.std(ssims),\n )\n print(\n \"Test IOUs: \", np.mean(ious), \" \", \"\\u00B1\", \" \", np.std(ious),\n )\n\n return ious, ssims, MAEs\n\n def infer_full_image(self, input, C_out, C_mask_out, kernel_size=256, stride=128):\n self.model.eval()\n B, C, W, H = input.shape\n pad_W = kernel_size - W % kernel_size\n pad_H = kernel_size - H % kernel_size\n\n x, _, _ = compute_pyramid_patch_weight_loss(kernel_size, kernel_size)\n\n input = F.pad(input, (0, pad_H, 0, pad_W), mode=\"reflect\").squeeze(0)\n _, W_pad, H_pad = input.shape\n patches = input.unfold(1, kernel_size, stride).unfold(2, kernel_size, stride)\n\n c, n_w, n_h, w, h = patches.shape\n patches = patches.contiguous().view(c, -1, kernel_size, kernel_size)\n\n dataset = torch.utils.data.TensorDataset(patches.permute(1, 0, 2, 3))\n batch_size = 16\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)\n op = []\n mask_op = []\n for batch_idx, sample1 in enumerate(dataloader):\n patch_mask_op, patch_op = self.model(sample1[0])\n op.append(patch_op)\n mask_op.append(patch_mask_op)\n op = torch.cat(op).permute(1, 0, 2, 3)\n mask_op = torch.cat(mask_op).permute(1, 0, 2, 3)\n\n op = op.permute(0, 2, 3, 1).reshape(1, -1, n_w * n_h)\n mask_op = mask_op.permute(0, 2, 3, 1).reshape(1, -1, n_w * n_h)\n # weights = torch.ones_like(op)\n weights_op = (\n torch.from_numpy(x)\n .unsqueeze(0)\n .unsqueeze(-1)\n .repeat(1, C_out, 1, n_w * n_h)\n .reshape(1, -1, n_w * n_h)\n ).cuda()\n op = torch.mul(weights_op, op)\n op = F.fold(\n op,\n output_size=(W_pad, H_pad),\n kernel_size=(kernel_size, kernel_size),\n stride=(stride, stride),\n )\n weights_op = F.fold(\n weights_op,\n output_size=(W_pad, H_pad),\n kernel_size=(kernel_size, kernel_size),\n stride=(stride, stride),\n )\n op = torch.div(op, weights_op)\n\n weights_mask_op = (\n torch.from_numpy(x)\n .unsqueeze(0)\n .unsqueeze(-1)\n .repeat(1, C_mask_out, 1, n_w * n_h)\n .reshape(1, -1, n_w * n_h)\n ).cuda()\n mask_op = torch.mul(weights_mask_op, mask_op)\n mask_op = F.fold(\n mask_op,\n output_size=(W_pad, H_pad),\n kernel_size=(kernel_size, kernel_size),\n stride=(stride, stride),\n )\n weights_mask_op = F.fold(\n weights_mask_op,\n output_size=(W_pad, H_pad),\n kernel_size=(kernel_size, kernel_size),\n stride=(stride, stride),\n )\n mask_op = torch.div(mask_op, weights_mask_op)\n # op = op.view(C_out, n_w, n_h, w, h)\n # mask_op = mask_op.view(C_mask_out, n_w, n_h, w, h)\n\n # output_h = n_w * w\n # output_w = n_h * h\n # op = op.permute(0, 1, 3, 2, 4).contiguous()\n # mask_op = mask_op.permute(0, 1, 3, 2, 4).contiguous()\n\n # op = op.view(C_out, output_h, output_w)\n # mask_op = mask_op.view(C_mask_out, output_h, output_w)\n\n output = torch.clamp(op, 0.0, 1.0)\n mask_op_softmax = (\n mask_op[:, :, :W, :H].squeeze(0).cpu().numpy().transpose(1, 2, 0)\n )\n mask_op = mask_op.argmax(dim=1).unsqueeze(1)\n output = output[:, :, :W, :H]\n mask_output = mask_op[:, :, :W, :H]\n return mask_output, output, mask_op_softmax\n\n def write_output_images(\n self,\n output,\n target,\n output_mask,\n target_mask,\n image_name,\n preprocess_step,\n preprocess_stats,\n magnification,\n ):\n image_save_folder = os.path.join(self.image_folder, f\"{magnification}_images\")\n if not os.path.exists(image_save_folder):\n os.makedirs(image_save_folder)\n mask_save_folder = os.path.join(self.image_folder, f\"{magnification}_images\")\n if not os.path.exists(mask_save_folder):\n os.makedirs(mask_save_folder)\n\n if preprocess_step == \"normalize\":\n min = preprocess_stats[0].cuda()\n max = preprocess_stats[1].cuda()\n output = (\n ((max - min) * output + min)\n .cpu()\n .numpy()\n .transpose(1, 2, 0)\n .astype(np.uint16)\n )\n target = (\n ((max - min) * target + min)\n .cpu()\n .numpy()\n .transpose(1, 2, 0)\n .astype(np.uint16)\n )\n elif preprocess_step == \"standardize\":\n mean = preprocess_stats[0].cuda()\n std = preprocess_stats[1].cuda()\n output = (\n ((output * std) + mean).cpu().clamp(0, 65535).numpy().transpose(1, 2, 0).astype(np.uint16)\n )\n target = (\n ((target * std) + mean).cpu().clamp(0, 65535).numpy().transpose(1, 2, 0).astype(np.uint16)\n )\n else:\n output = (output * 65535).clamp(0, 65535).cpu().numpy().transpose(1, 2, 0).astype(np.uint16)\n target = (target * 65535).clamp(0, 65535).cpu().numpy().transpose(1, 2, 0).astype(np.uint16)\n for i, filename in enumerate(image_name):\n cv2.imwrite(\n os.path.join(image_save_folder, f\"{filename}\"), output[:, :, i],\n )\n # cv2.imwrite(\n # os.path.join(mask_save_folder, f\"mask_{filename}\"),\n # (output_mask.cpu().numpy().transpose(1, 2, 0)[:, :, i] * 65535).astype(\n # np.uint16\n # ),\n # )\n return output, target\n\n\ndef compute_pyramid_patch_weight_loss(width: int, height: int) -> np.ndarray:\n \"\"\"Compute a weight matrix that assigns bigger weight on pixels in center and\n less weight to pixels on image boundary.\n This weight matrix then used for merging individual tile predictions and helps dealing\n with prediction artifacts on tile boundaries.\n :param width: Tile width\n :param height: Tile height\n :return: Since-channel image [Width x Height]\n \"\"\"\n xc = width * 0.5\n yc = height * 0.5\n xl = 0\n xr = width\n yb = 0\n yt = height\n Dc = np.zeros((width, height))\n De = np.zeros((width, height))\n\n Dcx = np.square(np.arange(width) - xc + 0.5)\n Dcy = np.square(np.arange(height) - yc + 0.5)\n Dc = np.sqrt(Dcx[np.newaxis].transpose() + Dcy)\n\n De_l = np.square(np.arange(width) - xl + 0.5) + np.square(0.5)\n De_r = np.square(np.arange(width) - xr + 0.5) + np.square(0.5)\n De_b = np.square(0.5) + np.square(np.arange(height) - yb + 0.5)\n De_t = np.square(0.5) + np.square(np.arange(height) - yt + 0.5)\n\n De_x = np.sqrt(np.minimum(De_l, De_r))\n De_y = np.sqrt(np.minimum(De_b, De_t))\n De = np.minimum(De_x[np.newaxis].transpose(), De_y)\n\n alpha = (width * height) / np.sum(np.divide(De, np.add(Dc, De)))\n W = alpha * np.divide(De, np.add(Dc, De))\n return W, Dc, De\n"
] | [
[
"numpy.minimum",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.sum",
"pandas.DataFrame",
"torch.no_grad",
"numpy.mean",
"torch.logical_and",
"torch.nn.L1Loss",
"numpy.square",
"numpy.arange",
"torch.from_numpy",
"numpy.std",
"torch.mul",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.div",
"numpy.median",
"torch.nn.DataParallel",
"torch.logical_or",
"torch.nn.functional.fold",
"numpy.add",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shkarupa-alex/tfswin | [
"ba9f5c8bb4848bb07da1758eb3b22c2d86df8607"
] | [
"tfswin/tests/test_winatt.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom keras import keras_parameterized, layers\nfrom keras.utils.generic_utils import register_keras_serializable\nfrom tfswin.winatt import WindowAttention\nfrom testing_utils import layer_multi_io_test\n\n\n@register_keras_serializable('TFSwin')\nclass WindowAttentionSqueeze(WindowAttention):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.input_spec = self.input_spec[:-1] + [layers.InputSpec(ndim=1, dtype='bool')]\n\n def build(self, input_shape):\n super().build(input_shape)\n self.input_spec = self.input_spec[:-1] + [layers.InputSpec(ndim=1, dtype='bool')]\n\n def call(self, inputs, **kwargs):\n inputs, mask, with_mask = inputs\n\n return super().call([inputs, mask, tf.squeeze(with_mask, axis=0)], **kwargs)\n\n\n@keras_parameterized.run_all_keras_modes\nclass TestWindowAttention(keras_parameterized.TestCase):\n def test_layer(self):\n inputs = 10 * np.random.random((1, 49, 96)) - 0.5\n masks = 10 * np.random.random((1, 1, 1, 49, 49)) - 0.5\n\n layer_multi_io_test(\n WindowAttentionSqueeze,\n kwargs={'window_size': 7, 'num_heads': 3, 'qkv_bias': True, 'qk_scale': None, 'attn_drop': 0.,\n 'proj_drop': 0.},\n input_datas=[inputs, masks, np.array([False])],\n input_dtypes=['float32', 'float32', 'bool'],\n expected_output_shapes=[(None, 49, 96)],\n expected_output_dtypes=['float32']\n )\n layer_multi_io_test(\n WindowAttentionSqueeze,\n kwargs={'window_size': 7, 'num_heads': 3, 'qkv_bias': True, 'qk_scale': None, 'attn_drop': 0.,\n 'proj_drop': 0.},\n input_datas=[inputs, masks, np.array([True])],\n input_dtypes=['float32', 'float32', 'bool'],\n expected_output_shapes=[(None, 49, 96)],\n expected_output_dtypes=['float32']\n )\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.array",
"tensorflow.squeeze",
"numpy.random.random",
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ekzhu/nserc-subjects | [
"2964715bdb1cd5ab07e18f7c09f8ad73e25ab00a"
] | [
"mlp.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.preprocessing.text import one_hot, Tokenizer\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.utils import to_categorical\n\nprint(\"=== Get Raw Data\")\ndatafiles = [\"./raw/NSERC_GRT_FYR2016_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2015_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2014_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2013_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2012_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2011_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2010_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2009_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2008_AWARD.csv\",\n \"./raw/NSERC_GRT_FYR2007_AWARD.csv\",\n ]\nprint(\"Using data files:\")\ndfs = []\nfor datafile in datafiles:\n print(\"- \" + datafile)\n df = pd.read_csv(datafile, index_col=False, engine='python')\n dfs.append(df)\ndf = pd.concat(dfs, ignore_index=True)\nprint(\"Available Columns:\")\nfor col_name in df.columns:\n print(\"* \" + col_name)\n\nprint(\"=== Preprocess Data\")\ndata_name = 'ApplicationSummary'\ntarget_name = 'ResearchSubjectGroupEN'\ndf = df[[data_name, target_name]]\ndf = df.loc[df[data_name] != \"No summary - Aucun sommaire\"].loc[pd.notnull(df[data_name])]\nprint(\"Use %s as data and %s as target\" % (data_name, target_name))\ndf = shuffle(df)\ndata = [one_hot(text, 10000) for text in df[data_name]]\ntarget = df[target_name]\ntarget_names = target\nle = LabelEncoder()\ntarget = le.fit_transform(target)\nprint(\"Target classes:\")\nfor name in le.classes_:\n print(\"* \" + name)\nnum_classes = np.max(target) + 1\nprint(\"Number of classes:\", num_classes)\ntest_split = 0.05\nprint(\"Set test split to %.2f\" % (test_split))\nnum_test_cases = int(len(target)*test_split)\ndata_train, target_train = data[num_test_cases:], target[num_test_cases:]\ndata_test, target_test = data[:num_test_cases], target[:num_test_cases]\ntarget_names_test = target_names[:num_test_cases]\nprint(\"Number of traning cases: %d\" % (len(data_train)))\nprint(\"Number of test cases: %d\" % (len(data_test)))\n\nmax_words = 1000\ntokenizer = Tokenizer(num_words=max_words)\ndata_train = tokenizer.sequences_to_matrix(data_train, mode='binary')\ndata_test = tokenizer.sequences_to_matrix(data_test, mode='binary')\nprint(\"data_train shape:\", data_train.shape)\nprint(\"data_test shape:\", data_test.shape)\ntarget_train = to_categorical(target_train, num_classes)\ntarget_test = to_categorical(target_test, num_classes)\nprint(\"target_train shape:\", target_train.shape)\nprint(\"target_test shape:\", target_test.shape)\n\nprint(\"=== Building model\")\nmodel = Sequential()\nmodel.add(Dense(512, input_shape=(max_words,)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nhistory = model.fit(data_train, target_train,\n batch_size=32,\n epochs=100,\n verbose=1,\n validation_split=0.1)\nprint()\nprint(\"== Evalaute\")\nscore = model.evaluate(data_test, target_test,\n batch_size=32,\n verbose=1)\nprint(\"== Evalaute\")\nprint(\"Test score:\", score[0])\nprint(\"Test accuracy:\", score[1])\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.notnull",
"sklearn.utils.shuffle",
"numpy.max",
"sklearn.preprocessing.LabelEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
janfrancu/gnn-model-explainer | [
"2f86bad871594842b68f47b9ede51180c645c47a"
] | [
"generate_synth_graphs.py"
] | [
"import os\nimport numpy as np\nimport gengraph\nimport utils.featgen as featgen\nimport json\n\nfrom networkx.readwrite import json_graph, write_gexf\n\n\ndef syn_task1(input_dim=10, seed=0):\n return gengraph.gen_syn1(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim, dtype=float)), seed=seed \n )\n\ndef syn_task2(seed=0):\n return gengraph.gen_syn2(seed=seed)\n\ndef syn_task3(input_dim=10, seed=0):\n return gengraph.gen_syn3(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim, dtype=float)), seed=seed\n )\n\ndef syn_task4(input_dim=10, seed=0):\n return gengraph.gen_syn4(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim, dtype=float)), seed=seed\n )\n\ndef syn_task5(input_dim=10, seed=0):\n return gengraph.gen_syn5(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim, dtype=float)), seed=seed\n )\n\ndef convert_to_json(G, labels, name):\n # convert numpy arrays to float and add label information\n # part of the label list is filled with np.int64\n for i, l in enumerate(labels):\n G.nodes[i]['feat'] = G.nodes[i]['feat'].tolist()\n G.nodes[i]['label'] = int(l)\n\n data = json_graph.node_link_data(G)\n\n # convert all node ids from int64 to int\n for i in range(len(data['nodes'])):\n data['nodes'][i]['id'] = int(data['nodes'][i]['id'])\n\n # convert all links ids from int64 to int\n for i in range(len(data['links'])):\n data['links'][i]['source'] = int(data['links'][i]['source'])\n data['links'][i]['target'] = int(data['links'][i]['target'])\n\n # node link JSON\n with open(name + '.json', 'w') as file:\n file.write(json.dumps(data))\n\n# comes from `train_node_classifier` method\ndef store_traintest_split(G, train_ratio, seed):\n num_nodes = G.number_of_nodes()\n num_train = int(num_nodes * train_ratio)\n idx = [i for i in range(num_nodes)]\n\n rng = np.random.default_rng(seed)\n rng.shuffle(idx)\n train_idx = idx[:num_train]\n # print(train_idx[:10])\n\n for n in train_idx:\n G.nodes[n]['split'] = \"train\"\n\n return G\n\n\noutdir = \"./synth_graphs\"\nif not os.path.isdir(outdir):\n os.mkdir(outdir)\n\ntrain_ratio = 0.8\nfor seed in range(20):\n for i in range(5):\n fun = 'syn_task' + str(i+1)\n G, labels, name = eval(fun)(seed=seed)\n store_traintest_split(G, train_ratio, seed)\n l = np.array(labels)\n # print(np.nonzero(l > 0))\n convert_to_json(G, labels, 'synth_graphs/' + fun + '_' + name + '_' + str(seed))\n\n\n### check reproducibility\n# import networkx as nx\n\n# for i in range(5):\n# fun = 'syn_task' + str(i+1)\n\n# G1, l1, _ = eval(fun)(seed=1)\n# G2, l2, _ = eval(fun)(seed=1)\n\n# d1, d2 = gengraph.preprocess_input_graph(G1, l1), gengraph.preprocess_input_graph(G2, l2)\n\n# print(np.all(d1[\"adj\"] == d2[\"adj\"]))\n# print(np.all(d1[\"feat\"] == d2[\"feat\"]))\n# print(np.all(d1[\"labels\"] == d2[\"labels\"]))\n\n# for i in range(5):\n# fun = 'syn_task' + str(i+1)\n\n# G1, l1, _ = eval(fun)(seed=1)\n# G2, l2, _ = eval(fun)(seed=2)\n\n# d1, d2 = gengraph.preprocess_input_graph(G1, l1), gengraph.preprocess_input_graph(G2, l2)\n\n# print(np.all(d1[\"adj\"] == d2[\"adj\"]))\n# print(np.all(d1[\"feat\"] == d2[\"feat\"]))\n# print(np.all(d1[\"labels\"] == d2[\"labels\"]))"
] | [
[
"numpy.array",
"numpy.random.default_rng",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cj-mclaughlin/SuperPoint | [
"3806a1dfb4e95b1cc0ad27ef95c40877f35633dc"
] | [
"superpoint/experiment.py"
] | [
"import logging\nimport yaml\nimport os\nimport argparse\nimport numpy as np\nfrom contextlib import contextmanager\nfrom json import dumps as pprint\n\nfrom superpoint.datasets import get_dataset\nfrom superpoint.models import get_model\nfrom superpoint.utils.stdout_capturing import capture_outputs\nfrom superpoint.settings import EXPER_PATH\n\nlogging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior() # noqa: E402\n\n\ndef train(config, n_iter, output_dir, pretrained_dir=None,\n checkpoint_name='model.ckpt'):\n checkpoint_path = os.path.join(output_dir, checkpoint_name)\n with _init_graph(config) as net:\n if pretrained_dir is not None:\n net.load(pretrained_dir)\n try:\n net.train(n_iter, output_dir=output_dir,\n validation_interval=config.get('validation_interval', 100),\n save_interval=config.get('save_interval', None),\n checkpoint_path=checkpoint_path,\n keep_checkpoints=config.get('keep_checkpoints', 1))\n except KeyboardInterrupt:\n logging.info('Got Keyboard Interrupt, saving model and closing.')\n net.save(os.path.join(output_dir, checkpoint_name))\n\n\ndef evaluate(config, output_dir, n_iter=None):\n with _init_graph(config) as net:\n net.load(output_dir)\n results = net.evaluate(config.get('eval_set', 'test'), max_iterations=n_iter)\n return results\n\n\ndef predict(config, output_dir, n_iter):\n pred = []\n data = []\n with _init_graph(config, with_dataset=True) as (net, dataset):\n if net.trainable:\n net.load(output_dir)\n test_set = dataset.get_test_set()\n for _ in range(n_iter):\n data.append(next(test_set))\n pred.append(net.predict(data[-1], keys='*'))\n return pred, data\n\n\ndef set_seed(seed):\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n\ndef get_num_gpus():\n return len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))\n\n\n@contextmanager\ndef _init_graph(config, with_dataset=False):\n set_seed(config.get('seed', int.from_bytes(os.urandom(4), byteorder='big')))\n n_gpus = 1 #get_num_gpus()\n logging.info('Number of GPUs detected: {}'.format(n_gpus))\n\n dataset = get_dataset(config['data']['name'])(**config['data'])\n model = get_model(config['model']['name'])(\n data={} if with_dataset else dataset.get_tf_datasets(),\n n_gpus=n_gpus, **config['model'])\n model.__enter__()\n if with_dataset:\n yield model, dataset\n else:\n yield model\n model.__exit__()\n tf.reset_default_graph()\n\n\ndef _cli_train(config, output_dir, args):\n assert 'train_iter' in config\n\n with open(os.path.join(output_dir, 'config.yml'), 'w') as f:\n yaml.dump(config, f, default_flow_style=False)\n \n if args.pretrained_model is not None:\n pretrained_dir = os.path.join(EXPER_PATH, args.pretrained_model)\n if not os.path.exists(pretrained_dir):\n raise ValueError(\"Missing pretrained model: \" + pretrained_dir)\n else:\n pretrained_dir = None\n \n train(config, config['train_iter'], output_dir, pretrained_dir)\n\n if args.eval:\n _cli_eval(config, output_dir, args)\n\n\ndef _cli_eval(config, output_dir, args):\n # Load model config from previous experiment\n with open(os.path.join(output_dir, 'config.yml'), 'r') as f:\n model_config = yaml.load(f)['model']\n model_config.update(config.get('model', {}))\n config['model'] = model_config\n\n results = evaluate(config, output_dir, n_iter=config.get('eval_iter'))\n\n # Print and export results\n logging.info('Evaluation results: \\n{}'.format(\n pprint(results, indent=2, default=str)))\n with open(os.path.join(output_dir, 'eval.txt'), 'a') as f:\n f.write('Evaluation for {} dataset:\\n'.format(config['data']['name']))\n for r, v in results.items():\n f.write('\\t{}:\\n\\t\\t{}\\n'.format(r, v))\n f.write('\\n')\n\n\n# TODO\ndef _cli_pred(config, args):\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n\n # Training command\n p_train = subparsers.add_parser('train')\n p_train.add_argument('config', type=str)\n p_train.add_argument('exper_name', type=str)\n p_train.add_argument('--eval', action='store_true')\n p_train.add_argument('--pretrained_model', type=str, default=None)\n p_train.set_defaults(func=_cli_train)\n\n # Evaluation command\n p_train = subparsers.add_parser('evaluate')\n p_train.add_argument('config', type=str)\n p_train.add_argument('exper_name', type=str)\n p_train.set_defaults(func=_cli_eval)\n\n # Inference command\n p_train = subparsers.add_parser('predict')\n p_train.add_argument('config', type=str)\n p_train.add_argument('exper_name', type=str)\n p_train.set_defaults(func=_cli_pred)\n\n args = parser.parse_args()\n with open(args.config, 'r') as f:\n config = yaml.load(f)\n output_dir = os.path.join(EXPER_PATH, args.exper_name)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n with capture_outputs(os.path.join(output_dir, 'log')):\n logging.info('Running command {}'.format(args.command.upper()))\n args.func(config, output_dir, args)\n"
] | [
[
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.reset_default_graph",
"numpy.random.seed",
"tensorflow.compat.v1.disable_v2_behavior"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
forki/Labs | [
"403ee5e05fcd280bf8123b874a7e9bb2b6dc390c"
] | [
"RegressionLab/RegressionLab/regression.py"
] | [
"'''\nThis script perfoms the basic process for applying a machine learning\nalgorithm to a dataset using Python libraries.\n\nThe four steps are:\n 1. Download a dataset (using pandas)\n 2. Process the numeric data (using numpy)\n 3. Train and evaluate learners (using scikit-learn)\n 4. Plot and compare results (using matplotlib)\n\n\nThe data is downloaded from URL, which is defined below. As is normal\nfor machine learning problems, the nature of the source data affects\nthe entire solution. When you change URL to refer to your own data, you\nwill need to review the data processing steps to ensure they remain\ncorrect.\n\n============\nExample Data\n============\nThe example is from http://mldata.org/repository/data/viewslug/stockvalues/\nIt contains stock prices and the values of three indices for each day\nover a five year period. See the linked page for more details about\nthis data set.\n\nThis script uses regression learners to predict the stock price for\nthe second half of this period based on the values of the indices. This\nis a naive approach, and a more robust method would use each prediction\nas an input for the next, and would predict relative rather than\nabsolute values.\n'''\n\n# Remember to update the script for the new data when you change this URL\nURL = \"http://mldata.org/repository/data/download/csv/stockvalues/\"\n\n# This is the column of the sample data to predict.\n# Try changing it to other integers between 1 and 155.\nTARGET_COLUMN = 32\n\n# Uncomment this call when using matplotlib to generate images\n# rather than displaying interactive UI.\n#import matplotlib\n#matplotlib.use('Agg')\n\nfrom pandas import read_table\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n # [OPTIONAL] Seaborn makes plots nicer\n import seaborn\nexcept ImportError:\n pass\n\n# =====================================================================\n\ndef download_data():\n '''\n Downloads the data for this script into a pandas DataFrame.\n '''\n\n # If your data is in an Excel file, install 'xlrd' and use\n # pandas.read_excel instead of read_table\n #from pandas import read_excel\n #frame = read_excel(URL)\n\n # If your data is in a private Azure blob, install 'azure' and use\n # BlobService.get_blob_to_path() with read_table() or read_excel()\n #import azure.storage\n #service = azure.storage.BlobService(ACCOUNT_NAME, ACCOUNT_KEY)\n #service.get_blob_to_path(container_name, blob_name, 'my_data.csv')\n #frame = read_table('my_data.csv', ...\n\n frame = read_table(\n URL,\n \n # Uncomment if the file needs to be decompressed\n #compression='gzip',\n #compression='bz2',\n\n # Specify the file encoding\n # Latin-1 is common for data from US sources\n encoding='latin-1',\n #encoding='utf-8', # UTF-8 is also common\n\n # Specify the separator in the data\n sep=',', # comma separated values\n #sep='\\t', # tab separated values\n #sep=' ', # space separated values\n\n # Ignore spaces after the separator\n skipinitialspace=True,\n\n # Generate row labels from each row number\n index_col=None,\n #index_col=0, # use the first column as row labels\n #index_col=-1, # use the last column as row labels\n\n # Generate column headers row from each column number\n header=None,\n #header=0, # use the first line as headers\n\n # Use manual headers and skip the first row in the file\n #header=0,\n #names=['col1', 'col2', ...],\n )\n\n # Return the entire frame\n #return frame\n\n # Return a subset of the columns\n return frame[[156, 157, 158, TARGET_COLUMN]]\n\n\n# =====================================================================\n\n\ndef get_features_and_labels(frame):\n '''\n Transforms and scales the input data and returns numpy arrays for\n training and testing inputs and targets.\n '''\n\n # Replace missing values with 0.0\n # or we can use scikit-learn to calculate missing values below\n #frame[frame.isnull()] = 0.0\n\n # Convert values to floats\n arr = np.array(frame, dtype=np.float)\n\n # Normalize the entire data set\n from sklearn.preprocessing import StandardScaler, MinMaxScaler\n arr = MinMaxScaler().fit_transform(arr)\n\n # Use the last column as the target value\n X, y = arr[:, :-1], arr[:, -1]\n # To use the first column instead, change the index value\n #X, y = arr[:, 1:], arr[:, 0]\n \n # Use 50% of the data for training, but we will test against the\n # entire set\n from sklearn.cross_validation import train_test_split\n X_train, _, y_train, _ = train_test_split(X, y, test_size=0.5)\n X_test, y_test = X, y\n \n # If values are missing we could impute them from the training data\n #from sklearn.preprocessing import Imputer\n #imputer = Imputer(strategy='mean')\n #imputer.fit(X_train)\n #X_train = imputer.transform(X_train)\n #X_test = imputer.transform(X_test)\n \n # Normalize the attribute values to mean=0 and variance=1\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n # To scale to a specified range, use MinMaxScaler\n #from sklearn.preprocessing import MinMaxScaler\n #scaler = MinMaxScaler(feature_range=(0, 1))\n \n # Fit the scaler based on the training data, then apply the same\n # scaling to both training and test sets.\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n # Return the training and test sets\n return X_train, X_test, y_train, y_test\n\n\n# =====================================================================\n\n\ndef evaluate_learner(X_train, X_test, y_train, y_test):\n '''\n Run multiple times with different algorithms to get an idea of the\n relative performance of each configuration.\n\n Returns a sequence of tuples containing:\n (title, expected values, actual values)\n for each learner.\n '''\n\n # Use a support vector machine for regression\n from sklearn.svm import SVR\n\n # Train using a radial basis function\n svr = SVR(kernel='rbf', gamma=0.1)\n svr.fit(X_train, y_train)\n y_pred = svr.predict(X_test)\n r_2 = svr.score(X_test, y_test)\n yield 'RBF Model ($R^2={:.3f}$)'.format(r_2), y_test, y_pred\n\n # Train using a linear kernel\n svr = SVR(kernel='linear')\n svr.fit(X_train, y_train)\n y_pred = svr.predict(X_test)\n r_2 = svr.score(X_test, y_test)\n yield 'Linear Model ($R^2={:.3f}$)'.format(r_2), y_test, y_pred\n\n # Train using a polynomial kernel\n svr = SVR(kernel='poly', degree=2)\n svr.fit(X_train, y_train)\n y_pred = svr.predict(X_test)\n r_2 = svr.score(X_test, y_test)\n yield 'Polynomial Model ($R^2={:.3f}$)'.format(r_2), y_test, y_pred\n\n\n# =====================================================================\n\n\ndef plot(results):\n '''\n Create a plot comparing multiple learners.\n\n `results` is a list of tuples containing:\n (title, expected values, actual values)\n \n All the elements in results will be plotted.\n '''\n\n # Using subplots to display the results on the same X axis\n fig, plts = plt.subplots(nrows=len(results), figsize=(8, 8))\n fig.canvas.set_window_title('Predicting data from ' + URL)\n\n # Show each element in the plots returned from plt.subplots()\n for subplot, (title, y, y_pred) in zip(plts, results):\n # Configure each subplot to have no tick marks\n # (these are meaningless for the sample dataset)\n subplot.set_xticklabels(())\n subplot.set_yticklabels(())\n\n # Label the vertical axis\n subplot.set_ylabel('stock price')\n\n # Set the title for the subplot\n subplot.set_title(title)\n\n # Plot the actual data and the prediction\n subplot.plot(y, 'b', label='actual')\n subplot.plot(y_pred, 'r', label='predicted')\n \n # Shade the area between the predicted and the actual values\n subplot.fill_between(\n # Generate X values [0, 1, 2, ..., len(y)-2, len(y)-1]\n np.arange(0, len(y), 1),\n y,\n y_pred,\n color='r',\n alpha=0.2\n )\n\n # Mark the extent of the training data\n subplot.axvline(len(y) // 2, linestyle='--', color='0', alpha=0.2)\n\n # Include a legend in each subplot\n subplot.legend()\n\n # Let matplotlib handle the subplot layout\n fig.tight_layout()\n\n # ==================================\n # Display the plot in interactive UI\n plt.show()\n\n # To save the plot to an image file, use savefig()\n #plt.savefig('plot.png')\n\n # Open the image file with the default image viewer\n #import subprocess\n #subprocess.Popen('plot.png', shell=True)\n\n # To save the plot to an image in memory, use BytesIO and savefig()\n # This can then be written to any stream-like object, such as a\n # file or HTTP response.\n #from io import BytesIO\n #img_stream = BytesIO()\n #plt.savefig(img_stream, fmt='png')\n #img_bytes = img_stream.getvalue()\n #print('Image is {} bytes - {!r}'.format(len(img_bytes), img_bytes[:8] + b'...'))\n\n # Closing the figure allows matplotlib to release the memory used.\n plt.close()\n\n\n# =====================================================================\n\n\nif __name__ == '__main__':\n # Download the data set from URL\n print(\"Downloading data from {}\".format(URL))\n frame = download_data()\n\n # Process data into feature and label arrays\n print(\"Processing {} samples with {} attributes\".format(len(frame.index), len(frame.columns)))\n X_train, X_test, y_train, y_test = get_features_and_labels(frame)\n\n # Evaluate multiple regression learners on the data\n print(\"Evaluating regression learners\")\n results = list(evaluate_learner(X_train, X_test, y_train, y_test))\n\n # Display the results\n print(\"Plotting the results\")\n plot(results)\n"
] | [
[
"sklearn.cross_validation.train_test_split",
"sklearn.svm.SVR",
"pandas.read_table",
"matplotlib.pyplot.close",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Youngermaster/Scikit-Learn-Platzi | [
"299b3bf8af106cb8e99ffe6b9e953a3948532599"
] | [
"regularization.py"
] | [
"import pandas as pd\nimport sklearn\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\nif __name__ == \"__main__\":\n dataset = pd.read_csv('./datasets/whr2017.csv')\n print(dataset.describe())\n\n X = dataset[['gdp', 'family', 'lifexp', 'freedom',\n 'corruption', 'generosity', 'dystopia']]\n y = dataset[['score']]\n\n print(X.shape)\n print(y.shape)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)\n\n modelLinear = LinearRegression().fit(X_train, y_train)\n y_predict_linear = modelLinear.predict(X_test)\n\n modelLasso = Lasso(alpha=0.02).fit(X_train, y_train)\n y_predict_lasso = modelLasso.predict(X_test)\n\n modelRidge = Ridge(alpha=1).fit(X_train, y_train)\n y_predict_ridge = modelRidge.predict(X_test)\n\n linear_loss = mean_squared_error(y_test, y_predict_linear)\n print(\"Linear Loss:\", linear_loss)\n\n lasso_loss = mean_squared_error(y_test, y_predict_lasso)\n print(\"Lasso Loss: \", lasso_loss)\n\n ridge_loss = mean_squared_error(y_test, y_predict_ridge)\n print(\"Ridge Loss: \", ridge_loss)\n\n print(\"=\"*32)\n print(\"Coef LASSO\")\n print(modelLasso.coef_)\n\n print(\"=\"*32)\n print(\"Coef RIDGE\")\n print(modelRidge.coef_)\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.Lasso",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.LinearRegression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tansyab1/LightNetPlus | [
"ed226e5454b2144063a6d8132b07c90e6a64e2d3"
] | [
"modules/dropout.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass DropBlock2D(nn.Module):\n r\"\"\"Randomly zeroes spatial blocks of the input tensor.\n As described in the paper\n `DropBlock: A regularization method for convolutional networks`_ ,\n dropping whole blocks of feature map allows to remove semantic\n information as compared to regular dropout.\n Args:\n keep_prob (float, optional): probability of an element to be kept.\n Authors recommend to linearly decrease this value from 1 to desired\n value.\n block_size (int, optional): size of the block. Block size in paper\n usually equals last feature map dimensions.\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n .. _DropBlock: A regularization method for convolutional networks:\n https://arxiv.org/abs/1810.12890\n \"\"\"\n\n def __init__(self, keep_prob=0.9, block_size=7):\n super(DropBlock2D, self).__init__()\n self.keep_prob = keep_prob\n self.block_size = block_size\n\n def forward(self, input):\n if not self.training or self.keep_prob == 1:\n return input\n gamma = (1. - self.keep_prob) / self.block_size ** 2\n for sh in input.shape[2:]:\n gamma *= sh / (sh - self.block_size + 1)\n M = torch.bernoulli(torch.ones_like(input) * gamma)\n Msum = F.conv2d(M,\n torch.ones((input.shape[1], 1, self.block_size, self.block_size)).to(device=input.device,\n dtype=input.dtype),\n padding=self.block_size // 2,\n groups=input.shape[1])\n torch.set_printoptions(threshold=5000)\n mask = (Msum < 1).to(device=input.device, dtype=input.dtype)\n return input * mask * mask.numel() / mask.sum() # TODO input * mask * self.keep_prob ?\n"
] | [
[
"torch.set_printoptions",
"torch.ones",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yaoyiran/ContrastiveBLI | [
"9df31a66ffd9c6eea60c94b7e224e759ab6628c3"
] | [
"C2/src/data_loader.py"
] | [
"import re\nimport os\nimport glob\nimport numpy as np\nimport random\nimport pandas as pd\nimport json\nfrom torch.utils.data import Dataset\nimport logging\nfrom tqdm import tqdm\nimport torch\nLOGGER = logging.getLogger(__name__)\n\n\n\ndef erase_and_mask(s, erase_len=5):\n if len(s) <= erase_len: return s\n if len(s) < 20: return s \n ind = np.random.randint(len(s)-erase_len)\n left, right = s.split(s[ind:ind+erase_len], 1)\n return \" \".join([left, \"[MASK]\", right])\n \n\nclass C2_Dataset(Dataset):\n def __init__(self, path, l1, l2, l1_voc, l2_voc, tokenizer, random_erase=0, template = 0): \n with open(path, 'r') as f:\n lines = f.readlines()\n\n self.str2lang = {\"hr\":\"croatian\", \"en\":\"english\",\"fi\":\"finnish\",\"fr\":\"french\",\"de\":\"german\",\"it\":\"italian\",\"ru\":\"russian\",\"tr\":\"turkish\"}\n self.l1 = l1\n self.l2 = l2\n self.template = template\n self.my_template = \"the word '{}' in {}.\"\n self.l1_voc = np.load(l1_voc, allow_pickle=True).item()\n self.l2_voc = np.load(l2_voc, allow_pickle=True).item()\n\n self.query_ids = []\n self.query_names = []\n self.idxs = []\n for i,line in enumerate(lines):\n line = line.rstrip(\"\\n\")\n query_id, name1, name2 = line.split(\"|+|\")\n self.query_ids.append(query_id)\n name1_, name2_ = name1.split(), name2.split()\n if bool(self.template):\n name1_ = [self.my_template.format(w,self.str2lang[self.l1]) for w in name1_]\n name2_ = [self.my_template.format(w,self.str2lang[self.l2]) for w in name2_]\n \n self.query_names.append((name1_, name2_))\n self.idxs.append(i)\n\n self.tokenizer = tokenizer\n self.query_id_2_index_id = {k: v for v, k in enumerate(list(set(self.query_ids)))}\n self.random_erase = random_erase\n \n def __getitem__(self, query_idx):\n\n query_name1 = self.query_names[query_idx][0]\n query_name2 = self.query_names[query_idx][1]\n\n idx = self.idxs[query_idx]\n if self.random_erase != 0:\n query_name2 = erase_and_mask(query_name2, erase_len=int(self.random_erase))\n query_id = self.query_ids[query_idx]\n query_id = int(self.query_id_2_index_id[query_id])\n\n return query_name1, query_name2, query_id\n\n\n def __len__(self):\n return len(self.query_names)\n\n\n\n\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Samuel-Nathanson/LeagueMusicPlayer | [
"4a6b45cd4d31a677b71e4e5297498ebd4a71bc15"
] | [
"sample.py"
] | [
"from riotwatcher import LolWatcher, ApiError\nimport pandas as pd\n\n# global variables \napi_key = 'RGAPI-7a2dab8c-d30a-4b75-a7bc-db7eb25dcf31'\n\nwatcher = LolWatcher(api_key)\nmy_region = 'na1'\n\nsummoner_names = ['jnw309', 'cadoo29', 'yolobadger', 'SI0N', 'Fr33 Smoke']\nsummoners = []\n\n# Max 100 Requests every 2 minutes - May need to self-throttle our API usage until we get a product development key\nfor summoner_name in summoner_names:\n\tsummoners.append(watcher.summoner.by_name(my_region, summoner_name))\n\tmatches = watcher.match.matchlist_by_account(my_region, summoners[-1]['accountId'])\n\tlast_match = matches['matches'][0]\n\tmatch_detail = watcher.match.by_id(my_region, last_match['gameId'])\n\n\tparticipants = []\n\tfor row in match_detail['participants']:\n\t\tparticipants_row = {}\n\t\tparticipants_row['champion'] = row['championId']\n\t\tparticipants_row['spell1'] = row['spell1Id']\n\t\tparticipants_row['spell2'] = row['spell2Id']\n\t\tparticipants_row['win'] = row['stats']['win']\n\t\tparticipants_row['kills'] = row['stats']['kills']\n\t\tparticipants_row['deaths'] = row['stats']['deaths']\n\t\tparticipants_row['assists'] = row['stats']['assists']\n\t\tparticipants_row['totalDamageDealt'] = row['stats']['totalDamageDealt']\n\t\tparticipants_row['goldEarned'] = row['stats']['goldEarned']\n\t\tparticipants_row['champLevel'] = row['stats']['champLevel']\n\t\tparticipants_row['totalMinionsKilled'] = row['stats']['totalMinionsKilled']\n\t\tparticipants_row['item0'] = row['stats']['item0']\n\t\tparticipants_row['item1'] = row['stats']['item1']\n\t\tparticipants.append(participants_row)\n\n\tdf = pd.DataFrame(participants)\n\tprint(df)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ErikGartner/actor | [
"a2d10671600088019cd1e14607dfb00eb71c4853"
] | [
"dataset/openpose-caffe/cache_panoptic.py"
] | [
"# From Python\n# It requires OpenCV installed for Python and OpenPose built with Python API support.\n# Tested for OpenPose ~1.4.0\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\nimport time\nfrom sys import platform\n\nimport cv2\n\nimport matplotlib.pyplot as plt\nfrom scipy.io import savemat\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef to_panoptic_format(joints):\n bodies = []\n if len(joints[\"body\"].shape) == 0:\n return bodies\n\n for pose_id in range(joints[\"body\"].shape[0]):\n body = {\n \"id\": pose_id,\n \"joints25\": joints[\"body\"][pose_id, :, :].flatten().tolist(),\n \"face\": joints[\"face\"][pose_id, :, :].flatten().tolist(),\n \"left_hand\": joints[\"left_hand\"][pose_id, :, :].flatten().tolist(),\n \"right_hand\": joints[\"right_hand\"][pose_id, :, :]\n .flatten()\n .tolist(),\n }\n bodies.append(body)\n return bodies\n\n\ndef predict_joints(opWrapper, image):\n datum = op.Datum()\n imageToProcess = cv2.imread(image)\n if imageToProcess is None:\n print(image)\n datum.cvInputData = imageToProcess\n opWrapper.emplaceAndPop([datum])\n\n if (\n len(datum.poseKeypoints.shape) == 0\n and len(datum.faceKeypoints.shape) == 0\n and len(datum.handKeypoints[0].shape) == 0\n and len(datum.handKeypoints[1].shape) == 0\n ):\n return {\n \"body\": np.array([]),\n \"face\": np.array([]),\n \"left_hand\": np.array([]),\n \"right_hand\": np.array([]),\n }\n\n # print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n # print(\"Face keypoints: \\n\" + str(datum.faceKeypoints))\n # print(\"Left hand keypoints: \\n\" + str(datum.handKeypoints[0]))\n # print(\"Right hand keypoints: \\n\" + str(datum.handKeypoints[1]))\n #\n return {\n \"body\": datum.poseKeypoints,\n \"face\": datum.faceKeypoints,\n \"left_hand\": datum.handKeypoints[0],\n \"right_hand\": datum.handKeypoints[1],\n }\n\n\ndef main(args):\n params = dict()\n params[\"model_folder\"] = os.path.join(args.openpose_path, \"models/\")\n params[\"face\"] = True\n params[\"hand\"] = True\n\n # Starting OpenPose\n opWrapper = op.WrapperPython()\n opWrapper.configure(params)\n opWrapper.start()\n\n scenes = sorted(glob.glob(os.path.join(args.dataset_path, \"*\")))\n for path in tqdm(scenes, desc=\"scene\"):\n cams = sorted(glob.glob(os.path.join(path, \"hdImgs\", \"*\")))\n\n for cam in tqdm(cams, desc=\"camera\"):\n scene_detections = []\n\n frames = sorted(glob.glob(os.path.join(cam, \"*\")))\n\n save_path = os.path.join(\n args.cache_path,\n \"openpose\",\n os.path.basename(path),\n os.path.basename(cam),\n \"joints2d_full\",\n )\n save_path_mat = save_path + \".mat\"\n save_path_json = save_path + \".json\"\n if (\n os.path.exists(save_path_mat)\n and os.path.exists(save_path_json)\n and not args.overwrite\n ):\n print(\"Already computed. Skipping!\")\n continue\n\n cam_detections = []\n for frame in tqdm(frames, desc=\"frame\"):\n image = frame\n joints = predict_joints(opWrapper, image)\n\n cam_detections.append(joints)\n scene_detections.append(cam_detections)\n\n # print(\"savepath\", save_path)\n savemat(save_path_mat, {\"poses\": scene_detections})\n\n # print(\"savepath\", save_path)\n with open(save_path_json, \"w\") as f:\n json.dump(\n [\n [to_panoptic_format(x) for x in z]\n for z in scene_detections\n ],\n f,\n indent=2,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_path\", help=\"Path to dataset folder\")\n parser.add_argument(\n \"cache_path\", help=\"Path to cache folder to save detections to\"\n )\n parser.add_argument(\n \"--openpose_path\", help=\"Path to openpose\", default=\"openpose\"\n )\n parser.add_argument(\n \"--overwrite\", help=\"Overwrite existing cache\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n try:\n # Import open pose\n sys.path.append(os.path.join(args.openpose_path, \"build\", \"python\"))\n from openpose import pyopenpose as op\n except ImportError as e:\n print(\n \"Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?\"\n )\n raise e\n\n main(args)\n"
] | [
[
"numpy.array",
"scipy.io.savemat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
YosefLab/Cassiopeia | [
"010072b307f7eadbf10dc4af8b2165e48f1736a7"
] | [
"cassiopeia/solver/ILPSolver.py"
] | [
"\"\"\"\nImplements the Cassiopeia-ILP solver, as described in Jones et al, Genome Biol\n2020. Briefly, this algorithm infers the maximum parsimony tree by solving for\na Steiner Tree over an inferred potential graph of potential intermediate\nevolutionary states.\n\"\"\"\nimport datetime\nimport logging\nimport time\nfrom typing import Dict, List, Optional\n\nimport hashlib\nimport itertools\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nfrom cassiopeia.data import CassiopeiaTree\nfrom cassiopeia.data import utilities as data_utilities\nfrom cassiopeia.mixins import ILPSolverError, is_ambiguous_state, logger\nfrom cassiopeia.solver import (\n CassiopeiaSolver,\n dissimilarity_functions,\n ilp_solver_utilities,\n solver_utilities,\n)\n\n\nclass ILPSolver(CassiopeiaSolver.CassiopeiaSolver):\n \"\"\"\n The Cassiopeia ILP-based maximum parsimony solver.\n\n ILPSolver is a subclass of CassiopeiaSolver and implements the\n Cassiopeia-ILP algorithm described in Jones et al, Genome Biol 2020. The\n solver proceeds by constructing a tree over a network of possible\n evolutionary states known as the potential graph. The procedure for\n constructing this tree is done by solving for a Steiner Tree with an\n integer linear programming (ILP) optimization approach.\n\n Args:\n convergence_time_limit: Amount of time allotted to the ILP for\n convergence. Ignored if set to 0.\n convergence_iteration_limit: Number of iterations allowed for ILP\n convergence. Ignored if set to 0.\n maximum_potential_graph_layer_size: Maximum size allowed for an\n iteration of the potential graph inference procedure. If this is\n exceeded, we return the previous iteration's graph or abort\n altogether.\n maximum_potential_graph_lca_distance: Maximum height of LCA to add to the\n potential graph. If this parameter is not provided or the specified\n value is 0, the maximum distance between any pair of samples is used\n as the maximum lca height.\n weighted: Weight edges on the potential graph by the negative log\n likelihood of the mutations.\n seed: Random seed to use during ILP optimization.\n mip_gap: Objective gap for mixed integer linear programming problem.\n logfile: A file to log output to. This will contain information around\n the potential graph inference procedure as well as the Steiner Tree\n optimization.\n prior_transformation: Function to use when transforming priors into\n weights. Supports the following transformations:\n \"negative_log\": Transforms each probability by the negative\n log (default)\n \"inverse\": Transforms each probability p by taking 1/p\n \"square_root_inverse\": Transforms each probability by the\n the square root of 1/p\n \"\"\"\n\n def __init__(\n self,\n convergence_time_limit: int = 12600,\n convergence_iteration_limit: int = 0,\n maximum_potential_graph_layer_size: int = 10000,\n maximum_potential_graph_lca_distance: Optional[int] = None,\n weighted: bool = False,\n seed: Optional[int] = None,\n mip_gap: float = 0.01,\n prior_transformation: str = \"negative_log\",\n ):\n\n super().__init__(prior_transformation)\n self.convergence_time_limit = convergence_time_limit\n self.convergence_iteration_limit = convergence_iteration_limit\n self.maximum_potential_graph_layer_size = (\n maximum_potential_graph_layer_size\n )\n self.maximum_potential_graph_lca_distance = (\n maximum_potential_graph_lca_distance\n )\n self.weighted = weighted\n self.seed = seed\n self.mip_gap = mip_gap\n\n @logger.namespaced(\"ILPSolver\")\n def solve(\n self,\n cassiopeia_tree: CassiopeiaTree,\n layer: Optional[str] = None,\n collapse_mutationless_edges: bool = False,\n logfile: str = \"stdout.log\",\n ):\n \"\"\"Infers a tree with Cassiopeia-ILP.\n\n Solves a tree using the Cassiopeia-ILP algorithm and populates a tree\n in the provided CassiopeiaTree.\n\n Args:\n cassiopeia_tree: Input CassiopeiaTree\n layer: Layer storing the character matrix for solving. If None, the\n default character matrix is used in the CassiopeiaTree.\n collapse_mutationless_edges: Indicates if the final reconstructed\n tree should collapse mutationless edges based on internal states\n inferred by Camin-Sokal parsimony. In scoring accuracy, this\n removes artifacts caused by arbitrarily resolving polytomies.\n logfile: Location to log progress.\n \"\"\"\n\n if self.weighted and not cassiopeia_tree.priors:\n raise ILPSolverError(\n \"Specify prior probabilities in the CassiopeiaTree for weighted\"\n \" analysis.\"\n )\n\n # setup logfile config\n handler = logging.FileHandler(logfile)\n handler.setLevel(logging.INFO)\n logger.addHandler(handler)\n logger.info(\"Solving tree with the following parameters.\")\n logger.info(f\"Convergence time limit: {self.convergence_time_limit}\")\n logger.info(\n f\"Convergence iteration limit: {self.convergence_iteration_limit}\"\n )\n logger.info(\n f\"Max potential graph layer size: {self.maximum_potential_graph_layer_size}\"\n )\n logger.info(\n f\"Max potential graph lca distance: {self.maximum_potential_graph_lca_distance}\"\n )\n logger.info(f\"MIP gap: {self.mip_gap}\")\n\n if layer:\n character_matrix = cassiopeia_tree.layers[layer].copy()\n else:\n character_matrix = cassiopeia_tree.character_matrix.copy()\n if any(\n is_ambiguous_state(state)\n for state in character_matrix.values.flatten()\n ):\n raise ILPSolverError(\"Solver does not support ambiguous states.\")\n\n unique_character_matrix = character_matrix.drop_duplicates()\n\n weights = None\n if cassiopeia_tree.priors:\n weights = solver_utilities.transform_priors(\n cassiopeia_tree.priors, self.prior_transformation\n )\n\n # find the root of the tree & generate process ID\n root = tuple(\n data_utilities.get_lca_characters(\n unique_character_matrix.values.tolist(),\n cassiopeia_tree.missing_state_indicator,\n )\n )\n pid = hashlib.md5(\n \"|\".join([str(r) for r in root]).encode(\"utf-8\")\n ).hexdigest()\n\n targets = [tuple(t) for t in unique_character_matrix.values.tolist()]\n\n if unique_character_matrix.shape[0] == 1:\n optimal_solution = nx.DiGraph()\n optimal_solution.add_node(root)\n optimal_solution = (\n self.__append_sample_names_and_remove_spurious_leaves(\n optimal_solution, character_matrix\n )\n )\n cassiopeia_tree.populate_tree(optimal_solution, layer=layer)\n return\n\n # determine diameter of the dataset by evaluating maximum distance to\n # the root from each sample\n if (self.maximum_potential_graph_lca_distance is not None) and (\n self.maximum_potential_graph_lca_distance > 0\n ):\n max_lca_distance = self.maximum_potential_graph_lca_distance\n\n else:\n max_lca_distance = 0\n lca_distances = [\n dissimilarity_functions.hamming_distance(\n root,\n np.array(u),\n ignore_missing_state=True,\n missing_state_indicator=cassiopeia_tree.missing_state_indicator,\n )\n for u in targets\n ]\n\n for (i, j) in itertools.combinations(range(len(lca_distances)), 2):\n max_lca_distance = max(\n max_lca_distance, lca_distances[i] + lca_distances[j] + 1\n )\n\n # infer the potential graph\n potential_graph = self.infer_potential_graph(\n unique_character_matrix,\n pid,\n max_lca_distance,\n weights,\n cassiopeia_tree.missing_state_indicator,\n )\n\n # generate Steiner Tree ILP model\n nodes = list(potential_graph.nodes())\n encoder = dict(zip(nodes, list(range(len(nodes)))))\n decoder = dict((v, k) for k, v in encoder.items())\n\n _potential_graph = nx.relabel_nodes(potential_graph, encoder)\n _targets = list(map(lambda x: encoder[x], targets))\n _root = encoder[root]\n\n model, edge_variables = self.generate_steiner_model(\n _potential_graph, _root, _targets\n )\n\n # solve the ILP problem and return a set of proposed solutions\n proposed_solutions = self.solve_steiner_instance(\n model, edge_variables, _potential_graph, pid, logfile\n )\n\n # select best model and post process the solution\n optimal_solution = proposed_solutions[0]\n optimal_solution = nx.relabel_nodes(optimal_solution, decoder)\n\n optimal_solution = self.post_process_steiner_solution(\n optimal_solution, root\n )\n\n # append sample names to the solution and populate the tree\n optimal_solution = (\n self.__append_sample_names_and_remove_spurious_leaves(\n optimal_solution, character_matrix\n )\n )\n\n cassiopeia_tree.populate_tree(optimal_solution, layer=layer)\n\n # rename internal nodes such that they are not tuples\n node_name_generator = solver_utilities.node_name_generator()\n internal_node_rename = {}\n for i in cassiopeia_tree.internal_nodes:\n internal_node_rename[i] = next(node_name_generator)\n cassiopeia_tree.relabel_nodes(internal_node_rename)\n\n cassiopeia_tree.collapse_unifurcations()\n\n # collapse mutationless edges\n if collapse_mutationless_edges:\n cassiopeia_tree.collapse_mutationless_edges(\n infer_ancestral_characters=True\n )\n logger.removeHandler(handler)\n\n def infer_potential_graph(\n self,\n character_matrix: pd.DataFrame,\n pid: int,\n lca_height: int,\n weights: Optional[Dict[int, Dict[int, str]]] = None,\n missing_state_indicator: int = -1,\n ) -> nx.DiGraph:\n \"\"\"Infers a potential graph for the observed states.\n\n Using the set of samples in the character matrix for this solver,\n this procedure creates a network which contains potential ancestors, or\n evolutionary intermediates.\n\n This procedure invokes\n `ilp_solver_utilities.infer_potential_graph_cython` which returns the\n edges of the potential graph in character string format\n (e.g., \"1|2|3|...\"). The procedure here decodes these strings, creates\n a Networkx directed graph, and adds edges to the graph. These weights\n are added to the edges of the graph using priors, if they are specified\n in the CassiopeiaTree, or the number of mutations along an edge.\n\n Args:\n character_matrix: Character matrix\n root: Specified root node, represented as a list of character states\n pid: Process ID for future reference\n lca_height: Maximum lca height to consider for connecting nodes to\n an LCA\n weights: Weights for character-state pairs, derived from the priors\n if these are available.\n missing_state_indicator: Indicator for missing data.\n\n Returns:\n A potential graph represented by a directed graph.\n \"\"\"\n\n potential_graph_edges = (\n ilp_solver_utilities.infer_potential_graph_cython(\n character_matrix.values.astype(str),\n pid,\n lca_height,\n self.maximum_potential_graph_layer_size,\n missing_state_indicator,\n )\n )\n\n if len(potential_graph_edges) == 0:\n raise ILPSolverError(\"Potential Graph could not be found with\" \n \" solver parameters. Try increasing\"\n \" `maximum_potential_graph_layer_size` or\"\n \" using another solver.\")\n\n # the potential graph edges returned are strings in the form\n # \"state1|state2|...\", so we \"decode\" them here\n decoded_edges = []\n for e1, e2 in potential_graph_edges:\n e1 = np.array(\n e1.replace(\"-\", str(missing_state_indicator)).split(\"|\")\n ).astype(int)\n e2 = np.array(\n e2.replace(\"-\", str(missing_state_indicator)).split(\"|\")\n ).astype(int)\n decoded_edges.append((tuple(e1), tuple(e2)))\n\n potential_graph = nx.DiGraph()\n potential_graph.add_edges_from(decoded_edges)\n\n return self.add_edge_weights(\n potential_graph, weights, missing_state_indicator\n )\n\n def add_edge_weights(\n self,\n potential_graph: nx.DiGraph(),\n weights: Optional[Dict[int, Dict[int, str]]] = None,\n missing_state_indicator: int = -1,\n ) -> nx.DiGraph:\n \"\"\"Annotates edges with the weight.\n\n Given a graph where nodes are iterable entities containing character\n states, annotated edges with respect to the number of mutations. If a\n prior dictionary is passed into the constructor, the log likelihood\n of the mutations is added instead. These values are stored in the\n `weight` attribute of the networkx graph.\n\n Args:\n potential_graph: Potential graph\n weights: Weights to use when comparing states between characters\n missing_state_indicator: Variable to indicate missing state\n information.\n\n Returns:\n The potential graph with edge weights added, stored in the `weight`\n attribute.\n \"\"\"\n\n weighted_graph = potential_graph.copy()\n for u, v in weighted_graph.edges():\n weighted_graph[u][v][\n \"weight\"\n ] = dissimilarity_functions.weighted_hamming_distance(\n list(u), list(v), missing_state_indicator, weights\n )\n\n return weighted_graph\n\n def generate_steiner_model(\n self,\n potential_graph: nx.DiGraph,\n root: List[int],\n targets: List[List[int]],\n ):\n \"\"\"Generates a Gurobi instance for Steiner Tree inference.\n\n Given a potential graph, a root to treat as the source, and a list of\n targets, create a Gurobi mixed integer linear programming instance for\n computing the Steiner Tree.\n\n Args:\n potential_graph: Potential graph representing the evolutionary\n space on which to solve for the Steiner Tree.\n root: A node in the graph to treat as the source.\n targets: A list of nodes in the tree that serve as targets for the\n Steiner Tree procedure.\n\n Returns:\n A Gurobipy Model instance and the edge variables involved.\n \"\"\"\n try:\n import gurobipy\n except ModuleNotFoundError:\n raise ILPSolverError(\n \"Gurobi not found. You must install Gurobi & \"\n \"gurobipy from source.\"\n )\n\n source_flow = {v: 0 for v in potential_graph.nodes()}\n\n if root not in potential_graph.nodes:\n raise ILPSolverError(\"Root node not in potential graph.\")\n for t in targets:\n if t not in potential_graph.nodes:\n raise ILPSolverError(\"Target node not in potential graph.\")\n\n # remove source from targets if it exists there\n targets = [t for t in targets if t != root]\n\n source_flow[root] = len(targets)\n for target in targets:\n source_flow[target] = -1\n\n model = gurobipy.Model(\"steiner\")\n\n ############# add variables #############\n\n # add flow for edges\n edge_variables = {}\n for u, v in potential_graph.edges():\n edge_variables[u, v] = model.addVar(\n vtype=gurobipy.GRB.INTEGER,\n lb=0,\n ub=len(targets),\n name=f\"edge_{u}_{v}\",\n )\n\n # add edge-usage indicator variable\n edge_variables_binary = {}\n for u, v in potential_graph.edges():\n edge_variables_binary[u, v] = model.addVar(\n vtype=gurobipy.GRB.BINARY, name=f\"edge_binary_{u}_{v}\"\n )\n\n model.update()\n\n ############# add constraints #############\n\n # check if edge is used\n for u, v in potential_graph.edges():\n model.addConstr(\n edge_variables_binary[u, v]\n >= (edge_variables[u, v] / len(targets))\n )\n\n # flow conservation constraints\n for v in potential_graph.nodes():\n model.addConstr(\n (\n gurobipy.quicksum(\n edge_variables[u, v]\n for u in potential_graph.predecessors(v)\n )\n + source_flow[v]\n )\n == (\n gurobipy.quicksum(\n edge_variables[v, w]\n for w in potential_graph.successors(v)\n )\n )\n )\n\n ############ add objective #############\n\n objective_expression = gurobipy.quicksum(\n edge_variables_binary[u, v] * potential_graph[u][v][\"weight\"]\n for u, v in potential_graph.edges()\n )\n model.setObjective(objective_expression, gurobipy.GRB.MINIMIZE)\n\n return model, edge_variables\n\n def solve_steiner_instance(\n self,\n model,\n edge_variables,\n potential_graph: nx.DiGraph,\n pid: int,\n logfile: str,\n ) -> List[nx.DiGraph]:\n \"\"\"Solves for a Steiner Tree from the Gurobi instance.\n\n This function works with a model that has been specified via Gurobi,\n and will solve the model using the stopping criteria that the user\n has specified in this class instance.\n\n Args:\n model: A Gurobi model corresponding to the Steiner Tree problem.\n This should be created with `generate_steiner_model`.\n edge_variables: Edge variables that were created during model\n generation. These are Gurobi variables that indicate whether\n two nodes are connected to one another in the Potential Graph;\n we use these variables to recreate a tree at the end from the\n Gurobi solution.\n potential_graph: Potential Graph that was used as input to the\n Steiner Tree problem.\n pid: Process ID\n logfile: Location to store standard out.\n\n Returns:\n A list of solutions\n \"\"\"\n try:\n import gurobipy\n except ModuleNotFoundError:\n raise ILPSolverError(\n \"Gurobi not found. You must install Gurobi & \"\n \"gurobipy from source.\"\n )\n\n model.params.LogToConsole = 0\n\n # Adding constant parameters\n model.params.THREADS = 1\n model.params.Presolve = 2\n model.params.MIPFocus = 1\n model.params.Cuts = 1\n model.params.Method = 4\n\n # Add user-defined parameters\n model.params.MIPGAP = self.mip_gap\n model.params.LogFile = logfile\n\n if self.seed is not None:\n model.params.Seed = self.seed\n\n if self.convergence_iteration_limit > 0:\n model.params.IterationLimit = self.convergence_iteration_limit\n\n if self.convergence_time_limit > 0:\n model.params.TimeLimit = self.convergence_time_limit\n\n start_time = time.time()\n\n model.optimize()\n\n # recover subgraphs\n solutions = []\n for i in range(model.SolCount):\n model.params.SolutionNumber = i\n subgraph = nx.DiGraph()\n value_for_edge = model.getAttr(\"xn\", edge_variables)\n for u, v in potential_graph.edges():\n if value_for_edge[u, v] > 0:\n subgraph.add_edge(\n u, v, weight=potential_graph[u][v][\"weight\"]\n )\n solutions.append(subgraph)\n\n end_time = time.time()\n\n execution_delta = datetime.timedelta(seconds=(end_time - start_time))\n days = execution_delta.days\n hours = execution_delta.seconds // 3600\n minutes = execution_delta.seconds // 60\n seconds = execution_delta.seconds % 60\n\n logger.info(\n f\"(Process {pid}) Steiner tree solving tool {days} days, \"\n f\"{hours} hours, {minutes} minutes, and {seconds} seconds.\"\n )\n if model.status != gurobipy.GRB.status.OPTIMAL:\n logger.info(\n f\"(Process {pid}) Warning: Steiner tree solving did \"\n \"not result in an optimal model.\"\n )\n\n return solutions\n\n def post_process_steiner_solution(\n self,\n solution: nx.DiGraph,\n root: List[int],\n ) -> nx.DiGraph:\n \"\"\"Post-processes the returned graph from Gurobi.\n\n This procedure post-processes the proposed Steiner Tree from Gurobi\n by enforcing that no self-loops occur and that every node at most one\n parent.\n\n Args:\n solution: The Gurobi solution\n root: The root node\n targets: A list of targets\n pid: Process id\n\n Returns:\n A cleaned up networkx solution\n \"\"\"\n\n processed_solution = solution.copy()\n for edge in nx.selfloop_edges(processed_solution):\n processed_solution.remove_edge(edge[0], edge[1])\n\n # remove spurious roots\n spurious_roots = [\n n\n for n in processed_solution\n if processed_solution.in_degree(n) == 0\n ]\n while len(spurious_roots) > 1:\n for r in spurious_roots:\n if r != root:\n processed_solution.remove_node(r)\n spurious_roots = [\n n\n for n in processed_solution\n if processed_solution.in_degree(n) == 0\n ]\n\n # impose that each node only has one parent\n non_tree_nodes = [\n n\n for n in processed_solution.nodes()\n if processed_solution.in_degree(n) > 1\n ]\n for node in non_tree_nodes:\n parents = processed_solution.predecessors(node)\n parents = sorted(\n parents,\n key=lambda k: processed_solution[k][node][\"weight\"],\n reverse=True,\n )\n\n if len(parents) == 2 and (\n parents[1] in nx.ancestors(processed_solution, parents[0])\n or (parents[0] in nx.ancestors(processed_solution, parents[1]))\n ):\n if parents[1] in nx.ancestors(processed_solution, parents[0]):\n processed_solution.remove_edge(parents[1], node)\n else:\n processed_solution.remove_edge(parents[0], node)\n\n else:\n for parent in parents[1:]:\n processed_solution.remove_edge(parent, node)\n\n return processed_solution\n\n def __append_sample_names_and_remove_spurious_leaves(\n self, solution: nx.DiGraph, character_matrix: pd.DataFrame\n ) -> nx.DiGraph:\n \"\"\"Append samples to character states in tree and prune spurious leaves.\n\n Given a tree where every node corresponds to a set of character states,\n append sample names at the deepest node that has its character\n state. Sometimes character states can exist in two separate parts of\n the tree (especially when using the Hybrid algorithm where parts of\n the tree are built independently), so we make sure we only add a\n particular sample once to the tree. Additionally, if there exist\n extant nodes that do not have samples appended to them, these nodes are\n removed and their lineages pruned as to not create any spurious leaf\n nodes.\n\n Args:\n solution: A Steiner Tree solution that we wish to add sample\n names to.\n character_matrix: Character matrix\n\n Returns:\n A solution with extra leaves corresponding to sample names.\n \"\"\"\n\n root = [n for n in solution if solution.in_degree(n) == 0][0]\n\n sample_lookup = character_matrix.apply(\n lambda x: tuple(x.values), axis=1\n )\n\n states_added = []\n for node in nx.dfs_postorder_nodes(solution, source=root):\n\n # append nodes with this character state at the deepest place\n # possible\n if node in states_added:\n continue\n\n samples = sample_lookup[sample_lookup == node].index\n if len(samples) > 0:\n solution.add_edges_from([(node, sample) for sample in samples])\n states_added.append(node)\n\n # remove extant lineages that don't correspond to leaves\n leaves = [n for n in solution if solution.out_degree(n) == 0]\n for l in leaves:\n if l not in character_matrix.index:\n curr_parent = list(solution.predecessors(l))[0]\n solution.remove_node(l)\n while (\n len(list(solution.successors(curr_parent))) < 1\n and curr_parent != root\n ):\n next_parent = list(solution.predecessors(curr_parent))[0]\n solution.remove_node(curr_parent)\n curr_parent = next_parent\n\n return solution\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DomnenkoB/Hybrid-Search-Engine | [
"955e559f66723249d8810bb0ecbefd3de571dee0"
] | [
"hybrid_search_engine/index.py"
] | [
"from collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom nltk import word_tokenize\n\nfrom hybrid_search_engine import nlp_engine\nimport hybrid_search_engine.utils.text_processing as processing\n\n\ndef build_index_from_df(df: pd.DataFrame, columns, id_column, filtering_columns=[], min_token_len=1,\n lemmatize=True, remove_stopwords=True, lower=True):\n df = processing.process_df(df, text_columns=columns, lemmatize=lemmatize,\n remove_stopwords=remove_stopwords, lower=lower)\n\n postings, frequencies = build_postings(df, columns)\n\n index = convert_postings_to_df(postings, frequencies, columns)\n ids = df[id_column].values\n norms = calculate_norms(index, columns, n_docs=len(ids))\n document_tags = df[filtering_columns].copy(deep=True)\n\n documents_df = pd.concat([df[id_column], norms, document_tags], axis=1)\n\n return index, documents_df\n\n\ndef calculate_norms(index, columns, n_docs):\n norms = pd.DataFrame()\n\n for c in columns:\n document_token_num = defaultdict(int)\n document_idxs = index[c].values\n document_frequencies = index[f\"{c} TF\"].values\n\n for documents, frequencies in zip(document_idxs, document_frequencies):\n for d, f in zip(documents, frequencies):\n document_token_num[d] += f\n\n norms[f\"{c} Norm\"] = [1 / np.sqrt(document_token_num[i]) if document_token_num[i] > 0 else 0 for i in range(n_docs)]\n\n return norms\n\n\ndef build_postings(corpus, columns):\n postings = dict()\n frequencies = dict()\n\n for i, document in corpus.iterrows():\n for column in columns:\n if len(document[column]) > 0:\n unique_tokens = list(sorted(set(document[column])))\n for token in unique_tokens:\n if token in postings:\n if column in postings[token]:\n postings[token][column].append(i)\n frequencies[token][column].append(document[column].count(token))\n else:\n postings[token][column] = [i]\n frequencies[token][column] = [document[column].count(token)]\n else:\n postings[token] = {\n column: [i]\n }\n frequencies[token] = {\n column: [document[column].count(token)]\n }\n\n return postings, frequencies\n\n\ndef convert_postings_to_df(postings, frequencies, columns):\n postings_df = pd.DataFrame({\n \"token\": [k for k in postings.keys()],\n \"token vector\": [nlp_engine(k).vector for k in postings.keys()]\n })\n\n postings_df[\"token vector\"] = postings_df[\"token vector\"].apply(lambda v: v / np.linalg.norm(v))\n\n for column in columns:\n postings_df[column] = [np.array([]) for _ in range(len(postings.keys()))]\n postings_df[f\"{column} TF\"] = [np.array([]) for _ in range(len(postings.keys()))]\n\n for i, token in enumerate(postings.keys()):\n for column, doc_ids in postings[token].items():\n postings_df.loc[i, column] = np.array(doc_ids)\n postings_df.loc[i, f\"{column} TF\"] = np.array(frequencies[token][column])\n\n v_dim = nlp_engine(\"\").vector.shape[0]\n mask = np.sum(postings_df[\"token vector\"].values.tolist(), axis=1)\n postings_df.loc[pd.isna(mask), \"token vector\"] = [np.zeros(v_dim)]\n\n return postings_df\n"
] | [
[
"pandas.concat",
"numpy.sqrt",
"numpy.linalg.norm",
"pandas.DataFrame",
"pandas.isna",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
mhamaneamogh50/All_Python_Pro | [
"e943d1924f160a98e5df612b920a67ef80be3b61"
] | [
"matplotlib_1.py"
] | [
"import matplotlib.pyplot as plt\r\n#plt.plot([1,2,3,4],[4,8,6,1],'-o') #adding dot and line\r\n#plt.plot([5,6,7,8],'-go') #addind red and dot line\r\n#plt.plot([9,10,11,12],'-ro')\r\n#plt.title(\"Design by amogh\")\r\n#fig, ax = plt.subplots() # Create a figure containing a single axes.\r\n#ax.plot([1, 2, 3, 4], [1, 4, 2, 3]) # Plot some data on the axes.\r\n#plt.plot([1,2,3,44,5,])\r\n##x=[1,2,3,4,]\r\n##y=[9,7,8,11\r\n##plt.plot(x,y)\r\n##plt.xlabel(\"Roll\")\r\n##plt.ylabel(\"Mark\")\r\nx=[\"sci\",\"m1\",\"sst\"]\r\ny=[100,95,88]\r\nplt.plot(x,y,label=\"amogh\")\r\n\r\ny1=[13,55,89]\r\nplt.plot(x,y1,label=\"alok\")\r\n\r\ny2=[40,45,88]\r\nplt.plot(x,y2,label=\"vivek\")\r\n\r\nplt.legend()\r\n\r\nplt.show()\r\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sandbergo/learn2branch | [
"a77ef5a22405484ca94181add6aa23297b30e9e7"
] | [
"11_analyze_data.py"
] | [
"\"\"\"\nAnalyze features of the generated problems.\n\"\"\"\nimport os\nimport sys\nimport importlib\nimport gzip\nimport argparse\nimport csv\nimport math\nimport numpy as np\nimport pandas as pd\nimport time\nimport pickle\nimport pathlib\nimport torch\n\n# import utilities\nfrom utilities_mlp import MLPDataset as Dataset\nfrom utilities_mlp import load_batch\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--problem',\n help='MILP instance type to process.',\n type=str,\n default='cauctions',\n choices=['setcover', 'cauctions', 'facilities', 'indset'],\n )\n parser.add_argument(\n '--data_path',\n help='name of the folder where train and valid folders are present. Assumes `data/samples` as default.',\n type=str,\n default=\"data/samples\",\n )\n args = parser.parse_args()\n problem_folders = {\n 'setcover': '500r_1000c_0.05d',\n 'cauctions': '100_500',\n 'facilities': '100_100_5',\n 'indset': '750_4',\n }\n problem_folder = problem_folders[args.problem]\n device = torch.device(\"cpu\")\n rng = np.random.RandomState(101)\n\n test_files = list(pathlib.Path(f\"{args.data_path}/{args.problem}/{problem_folder}/test\").glob('sample_node_135_57.pkl'))\n # test_files = list(pathlib.Path(f\"{args.data_path}/{args.problem}/{problem_folder}/test\").glob('sample_*0.pkl'))\n test_files = [str(x) for x in test_files]\n # print(len(test_files))\n\n with gzip.open(test_files[0], 'rb') as f:\n sample = pickle.load(f)\n\n obss, target, obss_feats, _ = sample['obss']\n #print(obss)\n \n v, _, _ = obss\n print(v.shape)\n sample_cand_scores = obss_feats['scores']\n sample_cands = np.where(sample_cand_scores != -1)[0]\n\n v_feats = v[sample_cands]\n # v_feats = utilities._preprocess(v_feats, mode='min-max-2')\n\n exit(0)\n\n chosen_test_files = rng.choice(test_files, 1, replace=True)\n\n test_data = Dataset(chosen_test_files)\n\n test_data = torch.utils.data.DataLoader(\n test_data, batch_size=1,\n shuffle=False, num_workers=0, collate_fn=load_batch)\n\n for batch in test_data:\n (cand_features, n_cands, best_cands, cand_scores, weights) = batch\n # cand_features, n_cands, best_cands, cand_scores, weights = map(lambda x:x.to(device), batch)\n print(cand_features.shape)\n # print(n_cands)\n # print(best_cands)\n # print(cand_scores[338])\n # print(weights)\n # torch.set_printoptions(profile=\"full\")\n # print(cand_features) # prints the whole tensor\n # exit(0)\n"
] | [
[
"torch.device",
"numpy.random.RandomState",
"torch.utils.data.DataLoader",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
calemen/permafrostanalytics | [
"31428bb7b4c0fc20ec06e1c472867542e506d8f5"
] | [
"ideas/eternal_sunshine/christian/train.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2019 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Train a resnet on images and time series data to predict below surface\ntemperatures.\n\nNote, this implementation does not represent a Bayesian Neural Network yet (not\nenough time).\n\nThe timeseries data is fed into the network via a fully-connected network that\nproduces the batch norm weights.\n\"\"\"\n\nfrom argparse import Namespace\nimport numpy as np\nimport random\nfrom time import time\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom regression_dataset import PermaRegressionDataset\nfrom resnet import ResNet\nfrom simple_bn_generator import BNGenerator\n\nif __name__ == \"__main__\":\n script_start = time()\n\n # FIXME\n ### User config\n args = Namespace()\n args.batch_size = 32\n args.lr = 0.01\n args.random_seed = 42\n args.local = False\n args.num_workers = 4\n\n ### Deterministic computation\n # Note, doesn't hold when using GPU or multiple workers that load the\n # dataset.\n torch.manual_seed(args.random_seed)\n torch.cuda.manual_seed_all(args.random_seed)\n np.random.seed(args.random_seed)\n random.seed(args.random_seed)\n\n ### Select device.\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print(\"Using cuda: \" + str(use_cuda))\n\n ### Generate networks.\n # FIXME Downsample images to speed up computation.\n img_net = ResNet(\n in_shape=[536, 356, 3], num_outs=1, no_weights=False, use_batch_norm=True\n )\n\n # We include the following sensory information:\n # - surface temperature\n # - radiation\n # - month\n # - daytime\n n_in = 4\n\n # Shapes of batchnorm layer weights.\n # We will produce these weights with an auxiliary fully-connected network,\n # that takes the timeseries data as input.\n bn_shapes = []\n for l in img_net._batchnorm_layers:\n bn_shapes.extend(l.param_shapes)\n\n # FIXME Our current implementation doesn't allow efficient batch processing.\n # Neither the underlying hnet allows the usage of multiple task embeddings\n # nor does the batch norm layer support a batch of weights.\n ts_net = BNGenerator(bn_shapes, 1, layers=[100, 100], te_dim=n_in)\n\n ### Generate datasets.\n train_data = PermaRegressionDataset(\n args.local, time_slice={\"start_time\": \"2017-01-01\", \"end_time\": \"2017-06-30\"}\n )\n test_data = PermaRegressionDataset(\n args.local, time_slice={\"start_time\": \"2017-07-01\", \"end_time\": \"2017-07-31\"}\n )\n\n train_loader = DataLoader(\n train_data,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n )\n test_loader = DataLoader(\n test_data,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n )\n\n ### Train and test.\n raise NotImplementedError(\"Training and testing not implemented yet.\")\n\n print(\"Program finished successfully in %f sec.\" % (time() - script_start))\n"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
db-bionlp/CLNER | [
"d8ddc6dbee17251622584b894dbb5765850b0add"
] | [
"flair/trainers/old_kd_trainer.py"
] | [
"from .trainer import *\nfrom flair.training_utils import store_teacher_predictions\nfrom flair.list_data import ListCorpus\nimport math\nimport random\nimport pdb\nimport copy\nfrom flair.datasets import CoupleDataset\nfrom ..custom_data_loader import ColumnDataLoader\nfrom torch.optim.adam import Adam\nimport torch.nn.functional as F\nimport traceback\nimport sys\nimport os\ndef get_corpus_lengths(train_data):\n return [len(corpus) for corpus in train_data]\n\ndef get_corpus_iterations(train_data, batch_size):\n corpus_lengths=get_corpus_lengths(train_data)\n return [math.ceil(corpus_length/float(batch_size)) for corpus_length in corpus_lengths]\n\ndef generate_training_order(train_data,batch_size,training_order=None):\n if training_order is None:\n corpus_iters=get_corpus_iterations(train_data,batch_size)\n training_order=[]\n for idx, iters in enumerate(corpus_iters):\n training_order=training_order+iters*[idx]\n random.shuffle(training_order)\n return training_order\n\n# Disable\ndef blockPrint():\n sys.stdout = open(os.devnull, 'w')\n\n# Restore\ndef enablePrint():\n sys.stdout = sys.__stdout__\n\n\n\n\nclass ModelDistiller(ModelTrainer):\n def __init__(\n self,\n student: flair.nn.Model,\n teachers: List[flair.nn.Model],\n corpus: ListCorpus,\n optimizer: torch.optim.Optimizer = SGD,\n professors: List[flair.nn.Model] = [],\n epoch: int = 0,\n optimizer_state: dict = None,\n scheduler_state: dict = None,\n use_tensorboard: bool = False,\n distill_mode: bool = True,\n ensemble_distill_mode: bool = False,\n config = None,\n train_with_professor: bool = False,\n is_test: bool = False,\n ):\n \"\"\"\n Initialize a model trainer\n :param model: The model that you want to train. The model should inherit from flair.nn.Model\n :param corpus: The dataset used to train the model, should be of type Corpus\n :param optimizer: The optimizer to use (typically SGD or Adam)\n :param epoch: The starting epoch (normally 0 but could be higher if you continue training model)\n :param optimizer_state: Optimizer state (necessary if continue training from checkpoint)\n :param scheduler_state: Scheduler state (necessary if continue training from checkpoint)\n :param use_tensorboard: If True, writes out tensorboard information\n \"\"\"\n # if teachers is not None:\n # assert len(teachers)==len(corpus.train_list), 'Training data and teachers should be the same length now!'\n self.model: flair.nn.Model = student\n self.corpus: ListCorpus = corpus\n self.distill_mode = distill_mode\n\n if self.distill_mode:\n self.corpus_teacher: ListCorpus = copy.deepcopy(corpus)\n # self.corpus_mixed_train: ListCorpus = [CoupleDataset(student_set,self.corpus_teacher.train_list[index]) for index,student_set in enumerate(self.corpus.train_list)]\n self.teachers: List[flair.nn.Model] = teachers\n self.professors: List[flair.nn.Model] = professors\n if self.teachers is not None:\n for teacher in self.teachers: teacher.eval()\n for professor in self.professors: professor.eval()\n # self.corpus = self.assign_pretrained_teacher_predictions(self.corpus,self.corpus_teacher,self.teachers)\n if self.model.biaf_attention and not is_test:\n # pdb.set_trace()\n pass\n self.model.init_biaf(self.teachers[0].hidden_size, num_teachers=len(self.teachers)+int(len(self.professors)>0))\n self.optimizer: torch.optim.Optimizer = optimizer\n if type(optimizer)==str:\n self.optimizer = getattr(torch.optim,optimizer)\n\n self.epoch: int = epoch\n self.scheduler_state: dict = scheduler_state\n self.optimizer_state: dict = optimizer_state\n self.use_tensorboard: bool = use_tensorboard\n \n self.config = config\n self.use_bert = False\n for embedding in self.config['embeddings']:\n if 'bert' in embedding.lower():\n self.use_bert=True\n self.ensemble_distill_mode: bool = ensemble_distill_mode\n self.train_with_professor: bool = train_with_professor\n # if self.train_with_professor:\n # assert len(self.professors) == len(self.corpus.train_list), 'Now only support same number of professors and corpus!'\n def train(\n self,\n base_path: Union[Path, str],\n learning_rate: float = 0.1,\n mini_batch_size: int = 32,\n eval_mini_batch_size: int = None,\n max_epochs: int = 100,\n anneal_factor: float = 0.5,\n patience: int = 3,\n min_learning_rate: float = 0.0001,\n train_with_dev: bool = False,\n monitor_train: bool = False,\n monitor_test: bool = False,\n embeddings_storage_mode: str = \"cpu\",\n checkpoint: bool = False,\n save_final_model: bool = True,\n anneal_with_restarts: bool = False,\n shuffle: bool = True,\n true_reshuffle: bool = False,\n param_selection_mode: bool = False,\n num_workers: int = 4,\n sampler=None,\n use_amp: bool = False,\n amp_opt_level: str = \"O1\",\n train_teacher: bool = False,\n professor_interpolation = 0.5,\n best_k = 10,\n gold_reward = False,\n **kwargs,\n ) -> dict:\n \"\"\"\n Trains any class that implements the flair.nn.Model interface.\n :param base_path: Main path to which all output during training is logged and models are saved\n :param learning_rate: Initial learning rate\n :param mini_batch_size: Size of mini-batches during training\n :param eval_mini_batch_size: Size of mini-batches during evaluation\n :param max_epochs: Maximum number of epochs to train. Terminates training if this number is surpassed.\n :param anneal_factor: The factor by which the learning rate is annealed\n :param patience: Patience is the number of epochs with no improvement the Trainer waits\n until annealing the learning rate\n :param min_learning_rate: If the learning rate falls below this threshold, training terminates\n :param train_with_dev: If True, training is performed using both train+dev data\n :param monitor_train: If True, training data is evaluated at end of each epoch\n :param monitor_test: If True, test data is evaluated at end of each epoch\n :param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),\n 'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)\n :param checkpoint: If True, a full checkpoint is saved at end of each epoch\n :param save_final_model: If True, final model is saved\n :param anneal_with_restarts: If True, the last best model is restored when annealing the learning rate\n :param shuffle: If True, data is shuffled during training\n :param param_selection_mode: If True, testing is performed against dev data. Use this mode when doing\n parameter selection.\n :param num_workers: Number of workers in your data loader.\n :param sampler: You can pass a data sampler here for special sampling of data.\n :param kwargs: Other arguments for the Optimizer\n :return:\n \"\"\"\n min_learning_rate = learning_rate/1000\n self.gold_reward = gold_reward\n self.embeddings_storage_mode=embeddings_storage_mode\n self.mini_batch_size=mini_batch_size\n if self.use_tensorboard:\n try:\n from torch.utils.tensorboard import SummaryWriter\n\n writer = SummaryWriter()\n except:\n log_line(log)\n log.warning(\n \"ATTENTION! PyTorch >= 1.1.0 and pillow are required for TensorBoard support!\"\n )\n log_line(log)\n self.use_tensorboard = False\n pass\n\n if use_amp:\n if sys.version_info < (3, 0):\n raise RuntimeError(\"Apex currently only supports Python 3. Aborting.\")\n if amp is None:\n raise RuntimeError(\n \"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex \"\n \"to enable mixed-precision training.\"\n )\n\n if eval_mini_batch_size is None:\n eval_mini_batch_size = mini_batch_size\n\n # cast string to Path\n if type(base_path) is str:\n base_path = Path(base_path)\n\n log_handler = add_file_handler(log, base_path / \"training.log\")\n\n log_line(log)\n log.info(f'Model: \"{self.model}\"')\n log_line(log)\n log.info(f'Corpus: \"{self.corpus}\"')\n log_line(log)\n log.info(\"Parameters:\")\n log.info(f' - learning_rate: \"{learning_rate}\"')\n log.info(f' - mini_batch_size: \"{mini_batch_size}\"')\n log.info(f' - patience: \"{patience}\"')\n log.info(f' - anneal_factor: \"{anneal_factor}\"')\n log.info(f' - max_epochs: \"{max_epochs}\"')\n log.info(f' - shuffle: \"{shuffle}\"')\n log.info(f' - train_with_dev: \"{train_with_dev}\"')\n log_line(log)\n log.info(f'Model training base path: \"{base_path}\"')\n log_line(log)\n log.info(f\"Device: {flair.device}\")\n log_line(log)\n log.info(f\"Embeddings storage mode: {embeddings_storage_mode}\")\n\n # determine what splits (train, dev, test) to evaluate and log\n if monitor_train:\n assert 0, 'monitor_train is not supported now!' \n # if train_with_dev:\n # assert 0, 'train_with_dev is not supported now!'\n\n log_train = True if monitor_train else False\n log_test = (\n True\n if (not param_selection_mode and self.corpus.test and monitor_test)\n else False\n )\n log_dev = True if not train_with_dev else False\n\n # prepare loss logging file and set up header\n loss_txt = init_output_file(base_path, \"loss.tsv\")\n\n weight_extractor = WeightExtractor(base_path)\n\n optimizer: torch.optim.Optimizer = self.optimizer(\n self.model.parameters(), lr=learning_rate, **kwargs\n )\n if self.optimizer_state is not None:\n optimizer.load_state_dict(self.optimizer_state)\n\n if use_amp:\n self.model, optimizer = amp.initialize(\n self.model, optimizer, opt_level=amp_opt_level\n )\n\n # minimize training loss if training with dev data, else maximize dev score\n anneal_mode = \"min\" if train_with_dev else \"max\"\n\n scheduler: ReduceLROnPlateau = ReduceLROnPlateau(\n optimizer,\n factor=anneal_factor,\n patience=patience,\n mode=anneal_mode,\n verbose=True,\n )\n\n if self.scheduler_state is not None:\n scheduler.load_state_dict(self.scheduler_state)\n\n # start from here, the train data is a list now\n train_data = self.corpus.train_list\n if self.distill_mode:\n train_data_teacher = self.corpus_teacher.train_list\n # train_data = self.corpus_mixed\n # if training also uses dev data, include in training set\n if train_with_dev:\n train_data = [ConcatDataset([train, self.corpus.dev_list[index]]) for index, train in enumerate(self.corpus.train_list)]\n if self.distill_mode:\n train_data_teacher = [ConcatDataset([train, self.corpus_teacher.dev_list[index]]) for index, train in enumerate(self.corpus_teacher.train_list)]\n # train_data = [ConcatDataset([train, self.corpus_mixed.dev_list[index]]) for index, train in self.corpus_mixed.train_list]\n # train_data_teacher = ConcatDataset([self.corpus_teacher.train, self.corpus_teacher.dev])\n # train_data = ConcatDataset([self.corpus_mixed.train, self.corpus_mixed.dev])\n if self.distill_mode:\n \n coupled_train_data = [CoupleDataset(data,train_data_teacher[index]) for index, data in enumerate(train_data)]\n if 'fast' in self.model.__class__.__name__.lower():\n faster=True\n else:\n faster=False\n\n if self.train_with_professor:\n log.info(f\"Predicting professor prediction\")\n coupled_train_data=self.assign_pretrained_teacher_predictions(coupled_train_data,self.professors,is_professor=True,faster=faster)\n # pdb.set_trace()\n for professor in self.professors:\n del professor\n del self.professors\n if self.model.distill_crf or self.model.distill_posterior:\n train_data=self.assign_pretrained_teacher_targets(coupled_train_data,self.teachers,best_k=best_k)\n else:\n train_data=self.assign_pretrained_teacher_predictions(coupled_train_data,self.teachers,faster=faster)\n # if self.ensemble_distill_mode:\n # log.info(f\"Ensembled distillation mode\")\n # coupled_train_data = ConcatDataset(coupled_train_data)\n # train_data=self.assign_ensembled_teacher_predictions(coupled_train_data,self.teachers)\n # # coupled_train_data = []\n # else:\n # train_data=self.assign_pretrained_teacher_predictions(coupled_train_data,self.teachers)\n # #train_data=ConcatDataset(train_data)\n for teacher in self.teachers:\n del teacher\n del self.teachers, self.corpus_teacher \n batch_loader=ColumnDataLoader(train_data,mini_batch_size,shuffle,use_bert=self.use_bert)\n else:\n batch_loader=ColumnDataLoader(ConcatDataset(train_data),mini_batch_size,shuffle,use_bert=self.use_bert)\n batch_loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n if self.distill_mode:\n if faster:\n batch_loader=self.resort(batch_loader,is_crf=self.model.distill_crf, is_posterior = self.model.distill_posterior, is_token_att = self.model.token_level_attention)\n\n dev_loader=ColumnDataLoader(list(self.corpus.dev),eval_mini_batch_size,use_bert=self.use_bert)\n dev_loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n test_loader=ColumnDataLoader(list(self.corpus.test),eval_mini_batch_size,use_bert=self.use_bert)\n test_loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n # if self.distill_mode:\n # batch_loader.expand_teacher_predictions()\n # if sampler is not None:\n # sampler = sampler(train_data)\n # shuffle = False\n\n dev_score_history = []\n dev_loss_history = []\n train_loss_history = []\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n previous_learning_rate = learning_rate\n training_order = None\n for epoch in range(0 + self.epoch, max_epochs + self.epoch):\n log_line(log)\n\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n\n # reload last best model if annealing with restarts is enabled\n if (\n learning_rate != previous_learning_rate\n and anneal_with_restarts\n and (base_path / \"best-model.pt\").exists()\n ):\n log.info(\"resetting to best model\")\n self.model.load(base_path / \"best-model.pt\")\n\n previous_learning_rate = learning_rate\n\n # stop training if learning rate becomes too small\n if learning_rate < min_learning_rate:\n log_line(log)\n log.info(\"learning rate too small - quitting training!\")\n log_line(log)\n break\n \n if shuffle:\n batch_loader.reshuffle()\n if true_reshuffle:\n # pdb.set_trace()\n batch_loader.true_reshuffle()\n if self.distill_mode:\n batch_loader=self.resort(batch_loader,is_crf=self.model.distill_crf, is_posterior = self.model.distill_posterior, is_token_att = self.model.token_level_attention)\n batch_loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n self.model.train()\n # TODO: check teacher parameters fixed and with eval() mode\n\n train_loss: float = 0\n\n seen_batches = 0\n #total_number_of_batches = sum([len(loader) for loader in batch_loader])\n total_number_of_batches = len(batch_loader)\n\n modulo = max(1, int(total_number_of_batches / 10))\n\n # process mini-batches\n batch_time = 0\n if self.distill_mode:\n if self.teacher_annealing:\n interpolation=1-(epoch*self.anneal_factor)/100.0\n if interpolation<0:\n interpolation=0\n else:\n interpolation=self.interpolation\n log.info(\"Current loss interpolation: \"+ str(interpolation))\n total_sent=0\n for batch_no, student_input in enumerate(batch_loader):\n start_time = time.time()\n total_sent+=len(student_input)\n # pdb.set_trace()\n try:\n if self.distill_mode:\n loss = self.model.simple_forward_distillation_loss(student_input, interpolation = interpolation, train_with_professor=self.train_with_professor, professor_interpolation = professor_interpolation)\n else:\n loss = self.model.forward_loss(student_input)\n optimizer.zero_grad()\n # Backward\n if use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n except Exception:\n traceback.print_exc()\n pdb.set_trace()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n\n seen_batches += 1\n train_loss += loss.item()\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(student_input, embeddings_storage_mode)\n if self.distill_mode:\n store_teacher_predictions(student_input, embeddings_storage_mode)\n\n batch_time += time.time() - start_time\n if batch_no % modulo == 0:\n log.info(\n f\"epoch {epoch + 1} - iter {batch_no}/{total_number_of_batches} - loss \"\n f\"{train_loss / seen_batches:.8f} - samples/sec: {total_sent / batch_time:.2f}\"\n )\n total_sent=0\n batch_time = 0\n iteration = epoch * total_number_of_batches + batch_no\n if not param_selection_mode:\n weight_extractor.extract_weights(\n self.model.state_dict(), iteration\n )\n\n train_loss /= seen_batches\n\n self.model.eval()\n\n log_line(log)\n log.info(\n f\"EPOCH {epoch + 1} done: loss {train_loss:.4f} - lr {learning_rate:.4f}\"\n )\n\n if self.use_tensorboard:\n writer.add_scalar(\"train_loss\", train_loss, epoch + 1)\n\n # anneal against train loss if training with dev, otherwise anneal against dev score\n current_score = train_loss\n\n # evaluate on train / dev / test split depending on training settings\n result_line: str = \"\"\n\n if log_train:\n train_eval_result, train_loss = self.model.evaluate(\n batch_loader,\n embeddings_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{train_eval_result.log_line}\"\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.train, embeddings_storage_mode)\n\n if log_dev:\n dev_eval_result, dev_loss = self.model.evaluate(\n dev_loader,\n embeddings_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{dev_loss}\\t{dev_eval_result.log_line}\"\n log.info(\n f\"DEV : loss {dev_loss} - score {dev_eval_result.main_score}\"\n )\n # calculate scores using dev data if available\n # append dev score to score history\n dev_score_history.append(dev_eval_result.main_score)\n dev_loss_history.append(dev_loss)\n\n current_score = dev_eval_result.main_score\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.dev, embeddings_storage_mode)\n\n if self.use_tensorboard:\n writer.add_scalar(\"dev_loss\", dev_loss, epoch + 1)\n writer.add_scalar(\n \"dev_score\", dev_eval_result.main_score, epoch + 1\n )\n\n if log_test:\n test_eval_result, test_loss = self.model.evaluate(\n test_loader,\n base_path / \"test.tsv\",\n embeddings_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{test_loss}\\t{test_eval_result.log_line}\"\n log.info(\n f\"TEST : loss {test_loss} - score {test_eval_result.main_score}\"\n )\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.test, embeddings_storage_mode)\n\n if self.use_tensorboard:\n writer.add_scalar(\"test_loss\", test_loss, epoch + 1)\n writer.add_scalar(\n \"test_score\", test_eval_result.main_score, epoch + 1\n )\n log.info(test_eval_result.log_line)\n log.info(test_eval_result.detailed_results)\n if type(self.corpus) is MultiCorpus:\n for subcorpus in self.corpus.corpora:\n log_line(log)\n log.info('current corpus: '+subcorpus.name)\n current_result, test_loss = self.model.evaluate(\n ColumnDataLoader(list(subcorpus.test),eval_mini_batch_size,use_bert=self.use_bert),\n out_path=base_path / f\"{subcorpus.name}-test.tsv\",\n embeddings_storage_mode=embeddings_storage_mode,\n )\n log.info(current_result.log_line)\n log.info(current_result.detailed_results)\n elif type(self.corpus) is ListCorpus:\n for index,subcorpus in enumerate(self.corpus.test_list):\n log_line(log)\n log.info('current corpus: '+self.corpus.targets[index])\n current_result, test_loss = self.model.evaluate(\n ColumnDataLoader(list(subcorpus),eval_mini_batch_size,use_bert=self.use_bert),\n out_path=base_path / f\"{self.corpus.targets[index]}-test.tsv\",\n embeddings_storage_mode=embeddings_storage_mode,\n )\n log.info(current_result.log_line)\n log.info(current_result.detailed_results)\n\n\n # determine learning rate annealing through scheduler\n scheduler.step(current_score)\n\n train_loss_history.append(train_loss)\n\n # determine bad epoch number\n try:\n bad_epochs = scheduler.num_bad_epochs\n except:\n bad_epochs = 0\n for group in optimizer.param_groups:\n new_learning_rate = group[\"lr\"]\n if new_learning_rate != previous_learning_rate:\n bad_epochs = patience + 1\n\n # log bad epochs\n log.info(f\"BAD EPOCHS (no improvement): {bad_epochs}\")\n\n # output log file\n with open(loss_txt, \"a\") as f:\n\n # make headers on first epoch\n if epoch == 0:\n f.write(\n f\"EPOCH\\tTIMESTAMP\\tBAD_EPOCHS\\tLEARNING_RATE\\tTRAIN_LOSS\"\n )\n\n if log_train:\n f.write(\n \"\\tTRAIN_\"\n + \"\\tTRAIN_\".join(\n train_eval_result.log_header.split(\"\\t\")\n )\n )\n if log_dev:\n f.write(\n \"\\tDEV_LOSS\\tDEV_\"\n + \"\\tDEV_\".join(dev_eval_result.log_header.split(\"\\t\"))\n )\n if log_test:\n f.write(\n \"\\tTEST_LOSS\\tTEST_\"\n + \"\\tTEST_\".join(\n test_eval_result.log_header.split(\"\\t\")\n )\n )\n\n f.write(\n f\"\\n{epoch}\\t{datetime.datetime.now():%H:%M:%S}\\t{bad_epochs}\\t{learning_rate:.4f}\\t{train_loss}\"\n )\n f.write(result_line)\n\n # if checkpoint is enable, save model at each epoch\n if checkpoint and not param_selection_mode:\n self.model.save_checkpoint(\n base_path / \"checkpoint.pt\",\n optimizer.state_dict(),\n scheduler.state_dict(),\n epoch + 1,\n train_loss,\n )\n\n # if we use dev data, remember best model based on dev evaluation score\n if (\n not train_with_dev\n and not param_selection_mode\n and current_score == scheduler.best\n ):\n self.model.save(base_path / \"best-model.pt\")\n\n # if we do not use dev data for model selection, save final model\n if save_final_model and not param_selection_mode:\n self.model.save(base_path / \"final-model.pt\")\n\n except KeyboardInterrupt:\n log_line(log)\n log.info(\"Exiting from training early.\")\n\n if self.use_tensorboard:\n writer.close()\n\n if not param_selection_mode:\n log.info(\"Saving model ...\")\n self.model.save(base_path / \"final-model.pt\")\n log.info(\"Done.\")\n\n # test best model if test data is present\n if self.corpus.test:\n final_score = self.final_test(base_path, eval_mini_batch_size, num_workers)\n else:\n final_score = 0\n log.info(\"Test data not provided setting final score to 0\")\n\n log.removeHandler(log_handler)\n\n if self.use_tensorboard:\n writer.close()\n\n return {\n \"test_score\": final_score,\n \"dev_score_history\": dev_score_history,\n \"train_loss_history\": train_loss_history,\n \"dev_loss_history\": dev_loss_history,\n }\n @property\n def interpolation(self):\n try:\n return self.config['interpolation']\n except:\n return 0.5\n @property\n def teacher_annealing(self):\n try:\n return self.config['teacher_annealing']\n except:\n return False\n @property\n def anneal_factor(self):\n try:\n return self.config['anneal_factor']\n except:\n return 2\n def assign_pretrained_teacher_predictions(self,coupled_train_data,teachers,is_professor=False,faster=False):\n if not is_professor:\n log.info('Distilling sentences...')\n else:\n log.info('Distilling professor sentences...')\n assert len(self.corpus.targets) == len(coupled_train_data), 'Coupled train data is not equal to target!'\n counter=0\n res_input=[]\n use_bert=False\n for teacher in teachers:\n if self.model.biaf_attention:\n teacher.biaf_attention=True\n if self.model.token_level_attention:\n teacher.token_level_attention=True\n if teacher.use_bert:\n use_bert=True\n # break\n for index, train_data in enumerate(coupled_train_data):\n target = self.corpus.targets[index]\n loader=ColumnDataLoader(list(train_data),self.mini_batch_size,grouped_data=True,use_bert=use_bert)\n for batch in loader:\n counter+=len(batch)\n student_input, teacher_input = zip(*batch)\n student_input=list(student_input)\n teacher_input=list(teacher_input)\n lengths1 = torch.Tensor([len(sentence.tokens) for sentence in teacher_input])\n lengths2 = torch.Tensor([len(sentence.tokens) for sentence in student_input])\n assert (lengths1==lengths2).all(), 'two batches are not equal!'\n max_len = max(lengths1)\n mask=self.model.sequence_mask(lengths1, max_len).unsqueeze(-1).cuda().float()\n for teacher in teachers:\n if target not in teacher.targets:\n continue\n with torch.no_grad():\n logits=teacher.forward(teacher_input)\n if self.model.distill_prob:\n # pdb.set_trace()\n logits=F.softmax(logits,-1)\n for idx, sentence in enumerate(student_input):\n # if hasattr(sentence,'_teacher_target'):\n # assert 0, 'The sentence has been filled with teacher target!'\n if self.model.biaf_attention:\n try:\n sentence.set_teacher_sentfeats(teacher.sent_feats[idx],self.embeddings_storage_mode)\n except:\n pdb.set_trace()\n if not faster:\n sentence.set_teacher_prediction(logits[idx][:len(sentence)], self.embeddings_storage_mode)\n else:\n sentence.set_teacher_prediction(logits[idx]*mask[idx], self.embeddings_storage_mode)\n teacher_input[idx].clear_embeddings()\n del logits\n # del teacher.sent_feats[idx]\n res_input+=student_input\n # store_embeddings(teacher_input, \"none\")\n \n # del teacher\n\n if is_professor:\n log.info('Distilled '+str(counter)+' professor sentences')\n return coupled_train_data\n else:\n log.info('Distilled '+str(counter)+' sentences')\n return res_input\n\n def assign_pretrained_teacher_targets(self,coupled_train_data,teachers,best_k=10):\n log.info('Distilling sentences as targets...')\n assert len(self.corpus.targets) == len(coupled_train_data), 'Coupled train data is not equal to target!'\n counter=0\n res_input=[]\n use_bert=False\n for teacher in teachers:\n if teacher.use_bert:\n use_bert=True\n for index, train_data in enumerate(coupled_train_data):\n target = self.corpus.targets[index]\n loader=ColumnDataLoader(list(train_data),self.mini_batch_size,grouped_data=True,use_bert=use_bert)\n for batch in loader:\n counter+=len(batch)\n student_input, teacher_input = zip(*batch)\n student_input=list(student_input)\n teacher_input=list(teacher_input)\n lengths1 = torch.Tensor([len(sentence.tokens) for sentence in teacher_input])\n lengths2 = torch.Tensor([len(sentence.tokens) for sentence in student_input])\n assert (lengths1==lengths2).all(), 'two batches are not equal!'\n # pdb.set_trace()\n\n max_len = max(lengths1)\n mask=self.model.sequence_mask(lengths1, max_len).unsqueeze(-1).cuda().long()\n lengths1=lengths1.long()\n for teacher in teachers:\n if target not in teacher.targets:\n continue\n with torch.no_grad():\n logits=teacher.forward(teacher_input)\n if self.model.distill_crf:\n if self.gold_reward:\n for s_id, sentence in enumerate(batch):\n # get the tags in this sentence\n tag_idx: List[int] = [\n tag_dictionary.get_idx_for_item(token.get_tag(tag_type).value)\n for token in sentence\n ]\n # add tags as tensor\n tag_template = torch.zeros(max_len,device='cpu')\n tag = torch.tensor(tag_idx, device='cpu')\n tag_template[:len(sentence)]=tag\n path_score, decode_idx=self.model._viterbi_decode_nbest(logits,mask,best_k)\n if self.model.distill_posterior:\n # pdb.set_trace()\n forward_var = self.model._forward_alg(logits, lengths1, distill_mode=True)\n backward_var = self.model._backward_alg(logits, lengths1)\n forward_backward_score = (forward_var + backward_var) * mask.float()\n # pdb.set_trace()\n for idx, sentence in enumerate(student_input):\n # if hasattr(sentence,'_teacher_target'):\n # assert 0, 'The sentence has been filled with teacher target!'\n if self.model.distill_crf:\n if self.model.crf_attention:\n # pdb.set_trace()\n sentence.set_teacher_weights(path_score[idx], self.embeddings_storage_mode) \n sentence.set_teacher_target(decode_idx[idx]*mask[idx], self.embeddings_storage_mode)\n if self.model.distill_posterior:\n sentence.set_teacher_posteriors(forward_backward_score[idx], self.embeddings_storage_mode)\n teacher_input[idx].clear_embeddings()\n del logits\n res_input+=student_input\n # store_embeddings(teacher_input, \"none\")\n \n # del teacher\n\n log.info('Distilled '+str(counter)+' sentences')\n return res_input\n def resort(self,loader,is_crf=False, is_posterior=False, is_token_att=False):\n for batch in loader.data:\n if is_posterior:\n posteriors=[x._teacher_posteriors for x in batch]\n posterior_lens=[len(x[0]) for x in posteriors]\n lens=posterior_lens.copy()\n targets=posteriors.copy()\n if is_token_att:\n sentfeats=[x._teacher_sentfeats for x in batch]\n sentfeats_lens=[len(x[0]) for x in sentfeats]\n # lens=sentfeats_lens.copy()\n # targets=sentfeats.copy()\n if is_crf:\n targets=[x._teacher_target for x in batch]\n lens=[len(x[0]) for x in targets]\n # pdb.set_trace()\n if not is_crf and not is_posterior:\n targets=[x._teacher_prediction for x in batch]\n lens=[len(x[0]) for x in targets]\n sent_lens=[len(x) for x in batch]\n if is_posterior:\n # pdb.set_trace()\n assert posterior_lens==lens, 'lengths of two targets not match'\n if max(lens)>min(lens) or max(sent_lens)!=max(lens):\n # if max(sent_lens)!=max(lens):\n # pdb.set_trace()\n max_shape=max(sent_lens)\n for index, target in enumerate(targets):\n new_targets=[]\n new_posteriors=[]\n new_sentfeats=[]\n if is_posterior:\n post_vals=posteriors[index]\n if is_token_att:\n sentfeats_vals=sentfeats[index]\n for idx, val in enumerate(target):\n # pdb.set_trace()\n if is_crf or (not is_crf and not is_posterior):\n shape=[max_shape]+list(val.shape[1:])\n new_target=torch.zeros(shape).type_as(val)\n new_target[:sent_lens[index]]=val[:sent_lens[index]]\n new_targets.append(new_target)\n if is_token_att:\n sentfeats_val=sentfeats_vals[idx]\n shape=[max_shape]+list(sentfeats_val.shape[1:])\n new_sentfeat=torch.zeros(shape).type_as(sentfeats_val)\n new_sentfeat[:sent_lens[index]]=sentfeats_val[:sent_lens[index]]\n new_sentfeats.append(new_sentfeat)\n # pdb.set_trace()\n if is_posterior:\n # pdb.set_trace() \n post_val=post_vals[idx]\n shape=[max_shape]+list(post_val.shape[1:])\n new_posterior=torch.zeros(shape).type_as(post_val)\n new_posterior[:sent_lens[index]]=post_val[:sent_lens[index]]\n new_posteriors.append(new_posterior)\n \n if is_crf:\n batch[index]._teacher_target=new_targets\n if is_posterior:\n batch[index]._teacher_posteriors=new_posteriors\n if is_token_att:\n batch[index]._teacher_sentfeats=new_sentfeats\n if not is_crf and not is_posterior:\n batch[index]._teacher_prediction=new_targets\n\n return loader\n\n def final_test(\n self, base_path: Path, eval_mini_batch_size: int, num_workers: int = 8, overall_test: bool = True, quiet_mode: bool = False, nocrf: bool = False\n ):\n\n log_line(log)\n \n\n self.model.eval()\n if quiet_mode:\n #blockPrint()\n log.disabled=True\n if (base_path / \"best-model.pt\").exists():\n self.model = self.model.load(base_path / \"best-model.pt\")\n log.info(\"Testing using best model ...\")\n elif (base_path / \"final-model.pt\").exists():\n self.model = self.model.load(base_path / \"final-model.pt\")\n log.info(\"Testing using final model ...\")\n if nocrf:\n self.model.use_crf=False\n if overall_test:\n loader=ColumnDataLoader(list(self.corpus.test),eval_mini_batch_size, use_bert=self.use_bert)\n loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n test_results, test_loss = self.model.evaluate(\n loader,\n out_path=base_path / \"test.tsv\",\n embeddings_storage_mode=\"none\",\n )\n test_results: Result = test_results\n log.info(test_results.log_line)\n log.info(test_results.detailed_results)\n log_line(log)\n if quiet_mode:\n enablePrint()\n print('Average', end=' ')\n print(test_results.main_score, end=' ')\n # if we are training over multiple datasets, do evaluation for each\n if type(self.corpus) is MultiCorpus:\n for subcorpus in self.corpus.corpora:\n log_line(log)\n log.info('current corpus: '+subcorpus.name)\n loader=ColumnDataLoader(list(subcorpus.test),eval_mini_batch_size,use_bert=self.use_bert)\n loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n current_result, test_loss = self.model.evaluate(\n loader,\n out_path=base_path / f\"{subcorpus.name}-test.tsv\",\n embeddings_storage_mode=\"none\",\n )\n log.info(current_result.log_line)\n log.info(current_result.detailed_results)\n if quiet_mode:\n print(subcorpus.name,end=' ')\n print(current_result.main_score,end=' ')\n\n elif type(self.corpus) is ListCorpus:\n for index,subcorpus in enumerate(self.corpus.test_list):\n log_line(log)\n log.info('current corpus: '+self.corpus.targets[index])\n loader=ColumnDataLoader(list(subcorpus),eval_mini_batch_size,use_bert=self.use_bert)\n loader.assign_tags(self.model.tag_type,self.model.tag_dictionary)\n current_result, test_loss = self.model.evaluate(\n loader,\n out_path=base_path / f\"{self.corpus.targets[index]}-test.tsv\",\n embeddings_storage_mode=\"none\",\n )\n log.info(current_result.log_line)\n log.info(current_result.detailed_results)\n if quiet_mode:\n print(self.corpus.targets[index],end=' ')\n print(current_result.main_score,end=' ')\n print()\n if overall_test:\n # get and return the final test score of best model\n final_score = test_results.main_score\n\n return final_score\n return 0\n\n"
] | [
[
"torch.nn.functional.softmax",
"torch.utils.tensorboard.SummaryWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
humlab-unesco/unesco_data_collection | [
"7c49b8c61e52007f507a426582e164c82c7fe67d"
] | [
"tests/courier/overlap_check_test.py"
] | [
"from io import StringIO\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nimport pandas as pd\nimport pytest\n\nfrom courier.config import get_config\nfrom courier.overlap_check import (\n get_articles_with_overlap_of_two_or_more_other_articles,\n get_overlapping_pages,\n save_overlapping_pages,\n)\n\nCONFIG = get_config()\n\n\ndef test_get_overlapping_pages_return_expected():\n overlapping_pages = get_overlapping_pages(CONFIG.article_index)\n expected = pd.read_csv(CONFIG.overlap_file, sep='\\t')\n assert overlapping_pages.equals(expected)\n assert overlapping_pages.shape == (1246, 3)\n assert set(overlapping_pages.columns) == set(['courier_id', 'page', 'count'])\n\n\ndef test_save_overlapping_pages():\n with TemporaryDirectory() as output_dir:\n overlapping_pages = get_overlapping_pages(CONFIG.article_index)\n save_overlapping_pages(overlapping_pages, (Path(output_dir) / 'overlap.csv'))\n assert (Path(output_dir) / 'overlap.csv').exists()\n\n\[email protected]\ndef article_index():\n statistics = StringIO(\n \"\"\"courier_id;year;record_number;pages;catalogue_title\n111111;2020;111;[1];a1\n111111;2020;111;[1];a2\n111111;2020;111;[1];a3\n222222;2020;222;[1];b1\n222222;2020;222;[1];b2\n\"\"\"\n )\n return pd.read_csv(statistics, sep=';')\n\n\ndef test_get_articles_with_overlap_of_two_or_more_other_articles(article_index): # pylint: disable=redefined-outer-name\n df = get_articles_with_overlap_of_two_or_more_other_articles(article_index)\n assert df.shape == (1, 3)\n assert df.iloc[0].record_number == 111\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CrepeGoat/FEHnt | [
"4d728c3022ce320a374edfbdc5d23b4482b62e21"
] | [
"static_frame/core/index_level.py"
] | [
"\n\nimport typing as tp\nfrom collections import deque\n\nimport numpy as np\n\nfrom static_frame.core.hloc import HLoc\nfrom static_frame.core.index import Index\nfrom static_frame.core.index import ILoc\nfrom static_frame.core.index import IndexGO\nfrom static_frame.core.array_go import ArrayGO\n\nfrom static_frame.core.util import KEY_MULTIPLE_TYPES\nfrom static_frame.core.util import KEY_ITERABLE_TYPES\nfrom static_frame.core.util import INT_TYPES\nfrom static_frame.core.util import GetItemKeyType\nfrom static_frame.core.index import LocMap\nfrom static_frame.core.util import resolve_dtype_iter\n\n\n\n\nclass IndexLevel:\n '''\n A nestable representation of an Index, where labels in that index optionally point to other Index objects.\n '''\n __slots__ = (\n 'index',\n 'targets',\n 'offset'\n )\n\n def __init__(self,\n index: Index,\n targets: tp.Optional[ArrayGO] = None, # np.ndarray[IndexLevel]\n offset: int = 0\n ):\n '''\n Args:\n offset: integer offset for this level.\n targets: np.ndarray of Indices; np.array supports fancy indexing for iloc compatible usage.\n '''\n self.index = index\n self.targets = targets\n self.offset = offset\n\n def to_index_level(self,\n offset: tp.Optional[int] = 0,\n cls: tp.Type['IndexLevel'] = None,\n ) -> 'IndexLevel':\n '''\n A deepcopy with optional adjustments, such as a different offset and possibly a different class.\n\n Args:\n offset: optionally provide a new offset for the copy. This is not applied recursively\n '''\n index = self.index.copy()\n\n if self.targets is not None:\n targets = ArrayGO(\n [t.to_index_level(offset=None, cls=cls) for t in self.targets],\n own_iterable=True)\n else:\n targets = None\n\n offset = self.offset if offset is None else offset\n cls = cls if cls else self.__class__\n return cls(index=index, targets=targets, offset=offset)\n\n def __len__(self):\n '''\n The length is the sum of all leaves\n '''\n if self.targets is None:\n return self.index.__len__()\n\n count = 0\n levels = [self]\n while levels:\n level = levels.pop()\n if level.targets is None: # terminus\n count += level.index.__len__()\n else:\n levels.extend(level.targets)\n return count\n\n def depths(self) -> tp.Generator[int, None, None]:\n # NOTE: as this uses a list instead of deque, the depths given will not be in the order of the actual leaves\n if self.targets is None:\n yield 1\n else:\n levels = [(self, 0)]\n while levels:\n level, depth = levels.pop()\n if level.targets is None: # terminus\n yield depth + 1\n else:\n next_depth = depth + 1\n levels.extend([(lvl, next_depth) for lvl in level.targets])\n\n def dtypes(self) -> tp.Generator[int, None, None]:\n # NOTE: as this uses a list instead of deque, the depths given will not be in the order of the actual leaves\n if self.targets is None:\n yield self.index.values.dtype\n else:\n levels = [self]\n while levels:\n level = levels.pop()\n # use pulbic interface, as this might be an IndexGO\n yield level.index.values.dtype\n if level.targets is not None: # not terminus\n levels.extend(level.targets)\n\n def __contains__(self, key: tp.Iterable[tp.Hashable]) -> bool:\n '''Given an iterable of single-element level keys (a leaf loc), return a bool.\n '''\n node = self\n for k in key:\n if not node.index.__contains__(k):\n return False\n\n if node.targets is not None:\n node = node.targets[node.index.loc_to_iloc(k)]\n continue\n else: # targets is None, meaning we are done\n node.index.loc_to_iloc(k)\n return True # if above does not raise\n\n def iter(self, depth_level: int) -> tp.Generator[tp.Hashable, None, None]:\n '''Given a depth position, return labels at that depth.\n '''\n if depth_level == 0:\n yield from self.index\n else:\n levels = deque(((self, 0),))\n while levels:\n level, depth = levels.popleft()\n if depth == depth_level:\n yield from level.index\n continue # do not need to descend\n if level.targets is not None: # terminus\n next_depth = depth + 1\n levels.extend([(lvl, next_depth) for lvl in level.targets])\n\n\n def leaf_loc_to_iloc(self, key: tp.Iterable[tp.Hashable]) -> int:\n '''Given an iterable of single-element level keys (a leaf loc), return the iloc value.\n '''\n if isinstance(key, ILoc):\n return key.key\n\n node = self\n pos = 0\n for k in key:\n if isinstance(k, KEY_MULTIPLE_TYPES):\n raise RuntimeError('slices cannot be used in a leaf selection into an IndexHierarchy; try HLoc[{}].'.format(key))\n if node.targets is not None:\n node = node.targets[node.index.loc_to_iloc(k)]\n pos += node.offset\n else: # targets is None, meaning we are done\n # assume that k returns an integer\n return pos + node.index.loc_to_iloc(k)\n\n\n def loc_to_iloc(self, key: GetItemKeyType) -> GetItemKeyType:\n '''\n This is the low-level loc_to_iloc, analagous to LocMap.loc_to_iloc as used by Index. As such, the key at this point should not be a Series or Index object.\n '''\n if isinstance(key, slice):\n # given a top-level definition of a slice (and if that slice results in a single value), we can get a value range\n return slice(*LocMap.map_slice_args(self.leaf_loc_to_iloc, key))\n\n # this should not match tuples that are leaf-locs\n if isinstance(key, KEY_ITERABLE_TYPES):\n if isinstance(key, np.ndarray) and key.dtype == bool:\n return key # keep as Boolean?\n return [self.leaf_loc_to_iloc(x) for x in key]\n\n # elif isinstance(key, IndexHierarchy):\n # # values will give an iterable if rows, where rows are iloc selectors\n # return [self.leaf_loc_to_iloc(tuple(x)) for x in key.values]\n\n if not isinstance(key, HLoc):\n # assume it is a leaf loc tuple\n return self.leaf_loc_to_iloc(key)\n\n # everything after this is an HLoc\n\n # collect all ilocs for all leaf indices matching HLoc patterns\n ilocs = []\n levels = deque(((self, 0, 0),)) # order matters\n\n while levels:\n level, depth, offset = levels.popleft()\n depth_key = key[depth]\n next_offset = offset + level.offset\n\n # print(level, depth, offset, depth_key, next_offset)\n # import ipdb; ipdb.set_trace()\n\n if level.targets is None:\n try:\n ilocs.append(level.index.loc_to_iloc(depth_key, offset=next_offset))\n except KeyError:\n pass\n else: # target is iterable np.ndaarray\n try:\n iloc = level.index.loc_to_iloc(depth_key) # no offset\n except KeyError:\n pass\n else:\n level_targets = level.targets[iloc] # get one or more IndexLevel objects\n next_depth = depth + 1\n # if not an ndarray, iloc has extracted a single IndexLevel\n if isinstance(level_targets, IndexLevel):\n levels.append((level_targets, next_depth, next_offset))\n else:\n levels.extend([(lvl, next_depth, next_offset)\n for lvl in level_targets])\n\n iloc_count = len(ilocs)\n if iloc_count == 0:\n raise KeyError('no matching keys across all levels')\n\n if iloc_count == 1 and not key.has_key_multiple():\n # drop to a single iloc selection\n return ilocs[0]\n\n # NOTE: might be able to combine contiguous ilocs into a single slice\n iloc = [] # combine into one flat iloc\n length = self.__len__()\n for part in ilocs:\n if isinstance(part, slice):\n iloc.extend(range(*part.indices(length)))\n # just look for ints\n elif isinstance(part, INT_TYPES):\n iloc.append(part)\n else: # assume it is an iterable\n iloc.extend(part)\n return iloc\n\n def get_labels(self) -> np.ndarray:\n '''\n Return an immutable NumPy 2D array of all labels found in this IndexLevels instance.\n '''\n # assume uniform depths\n depth_count = next(self.depths())\n shape = self.__len__(), depth_count\n dtype = resolve_dtype_iter(self.dtypes())\n labels = np.empty(shape, dtype=dtype)\n row_count = 0\n\n levels = deque(((self, 0, None),)) # order matters\n\n while levels:\n level, depth, row_previous = levels.popleft()\n\n if level.targets is None:\n rows = len(level.index.values)\n row_slice = slice(row_count, row_count + rows)\n labels[row_slice, :] = row_previous\n labels[row_slice, depth] = level.index.values\n row_count += rows\n\n else: # target is iterable np.ndaarray\n depth_next = depth + 1\n for label, level_target in zip(level.index.values, level.targets):\n if row_previous is None:\n # shown to be faster to allocate entire row width\n row = np.empty(depth_count, dtype=dtype)\n else:\n row = row_previous.copy()\n row[depth] = label\n levels.append((level_target, depth_next, row))\n\n labels.flags.writeable = False\n return labels\n\nclass IndexLevelGO(IndexLevel):\n '''Grow only variant of IndexLevel\n '''\n __slots__ = (\n 'index',\n 'targets',\n 'offset'\n )\n\n def __init__(self,\n index: IndexGO,\n targets: tp.Optional[np.ndarray] = None,\n offset: int = 0\n ):\n assert isinstance(index, IndexGO)\n # assume that we must copy this index as it is mutable; possibly add an own_index option if this can be optimized\n index = index.copy()\n IndexLevel.__init__(self, index=index, targets=targets, offset=offset)\n\n #---------------------------------------------------------------------------\n # grow only mutation\n\n def extend(self, level: IndexLevel):\n # assert isinstance(level, IndexLevelGO)\n\n depth = next(self.depths())\n if depth != next(level.depths()):\n raise Exception('level for extension does not have necessary levels.')\n\n # this will raise for duplicates\n self.index.extend(level.index.values)\n\n def target_gen():\n offset_prior = self.__len__()\n for t in level.targets:\n # only need to update offsets at this level, as lower levels are relative to this\n target = t.to_index_level(offset_prior, cls=self.__class__)\n offset_prior += len(target)\n yield target\n\n self.targets.extend(target_gen())\n\n\n def append(self, key: tuple):\n '''Add a single, full-depth leaf loc.\n '''\n # find fist depth that does not contain key\n depth_count = next(self.depths())\n\n if len(key) != depth_count:\n raise RuntimeError('appending key {} of insufficent depth {}'.format(\n key, depth_count))\n\n depth_not_found = -1\n edge_nodes = np.empty(depth_count, dtype=object)\n\n node = self\n for depth, k in enumerate(key):\n edge_nodes[depth] = node\n # only set on first encounter in descent\n if depth_not_found == -1 and not node.index.__contains__(k):\n depth_not_found = depth\n if node.targets is not None:\n node = node.targets[-1]\n\n assert depth_not_found != -1\n level_previous = None\n\n for depth in range(depth_count - 1, depth_not_found - 1, -1):\n node = edge_nodes[depth]\n k = key[depth]\n # print('key', k, 'current edge index', node.index.values)\n\n if depth == depth_not_found:\n # when at the the depth not found, we always update the index\n node.index.append(k)\n\n # if we have targets, must update them\n if node.targets is not None:\n level_previous.offset = node.__len__()\n node.targets.append(level_previous)\n\n else: # depth not found is higher up\n if node.targets is None:\n # we are at the max depth; will need to create a LevelGO to append in th next level\n level_previous = IndexLevelGO(\n index=IndexGO((k,)),\n offset=0,\n targets=None\n )\n else:\n # targets = np.empty(1, dtype=object)\n targets = ArrayGO([level_previous,], own_iterable=True)\n level_previous = IndexLevelGO(\n index=IndexGO((k,)),\n offset=0,\n targets=targets\n )\n\n\n\n\n"
] | [
[
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akash-harijan/cataract-detection | [
"ccb7045290a7a002bba1ff68220d19ec3a79ea2d"
] | [
"src/models/predict_model.py"
] | [
"import tensorflow as tf\nfrom tensorflow import keras\nimport cv2\n\n\nif __name__ == \"__main__\":\n\n import os\n print(os.getcwd())\n\n model = keras.models.load_model('./../../models/final-700imgs.h5')\n img = cv2.imread('./../../data/external/Test/cataract/img315.jpeg')\n resized = cv2.resize(img, (160, 160))\n\n input_img = resized.reshape((1,)+resized.shape)\n print(input_img.shape)\n\n output = model.predict(input_img/255.0)\n print(output)\n print(\"Cataract\" if output >= 0.5 else \"Normal\")\n"
] | [
[
"tensorflow.keras.models.load_model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
wangjiehui11235/ultron | [
"ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7"
] | [
"ultron/factor/combine/kutil.py"
] | [
"# -*- coding: utf-8 -*-\nimport pandas as pd\ndef calc_ic(factor_df, return_df, factor_list, return_col_name='target_return', ic_type='spearman'):\n \"\"\"\n 计算因子IC值, 本月和下月因子值的秩相关\n params:\n factor_df: DataFrame, columns=['ticker', 'tradeDate', factor_list]\n return_df: DataFrame, colunms=['ticker, 'tradeDate', return_col_name], 预先计算好的未来的收益率\n factor_list: list, 需要计算IC的因子名list\n return_col_name: str, return_df中的收益率列名\n method: : {'spearman', 'pearson'}, 默认'spearman', 指定计算rank IC('spearman')或者Normal IC('pearson')\n return:\n DataFrame, 返回各因子的IC序列, 列为: ['tradeDate', factor_list]\n \"\"\"\n merge_df = factor_df.merge(return_df, on=['code', 'trade_date'])\n # 遍历每个因子,计算对应的IC\n factor_ic_list = []\n for factor_name in factor_list:\n tmp_factor_ic = merge_df.groupby(['trade_date']).apply(\n lambda x: x[[factor_name, return_col_name]].corr(method=ic_type).values[0, 1])\n tmp_factor_ic.name = factor_name\n factor_ic_list.append(tmp_factor_ic)\n factor_ic_frame = pd.concat(factor_ic_list, axis=1)\n factor_ic_frame.reset_index(inplace=True)\n return factor_ic_frame\n"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Datamuseum-DK/R1000.HwDoc | [
"cb0841540a4ac184a08957daac1a470b6916a663"
] | [
"ImageProcessing/schematics.py"
] | [
"#!/usr/local/bin/python3\n#\n# Copyright (c) 2021 Poul-Henning Kamp\n# All rights reserved.\n#\n# Author: Poul-Henning Kamp <[email protected]>\n#\n# SPDX-License-Identifier: BSD-2-Clause\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\n'''\nThe main tool-chest for the project\n===================================\n\nAbout coordinates and coordinate systems.\n\nFirst, and most importantly, numpy indexes like this: Image[Y][X]\n\nThe most important coordinate system is the original \"drawing\ncoordinates\", as marked out by the arrows and numbers between the\nthick and the then borders: This is what all the components, texts\nand lines are positioned relative to.\n\nThe following have been established by counding pixels in the\noriginal images:\n\nThe image resolution of the drawing coordinates is approx 265 ± 10 DPI\n\nThe inside of the thick outer border marks the edge of the \"drawing\nsheet\", which is 23\" wide and 14.9\" tall.\n\nThe thick border was probably drawn with 0.06\" thickness: 16 px / 265 px.\n\nThe middle of the thin inner border is located 0.2\" from the edge\nof the logical sheet (ie: BL=0.2\"x0.2\" TR=22.8\"x14.7\")\n\nThe thin border was probably drawn with 0.025\" thickness: (6-7) px / 265 px.\n\nIt looks like the software used depended on a custom character-generator\nwith a 0.1\"x0.1\" pitch, driving a Versatec electrostatic matrix\nplotter, and there are artifacts consistent with the actual output\nhaving been produced on one.\n\nThis means that graphical objects could not be arbitrarily placed\non the paper, for instance all letters, digits and other glyphs\nfit in a 0.1\"x0.1\" raster.\n\n'''\n\nimport glob\nimport subprocess\n\nimport imageio\n\nimport numpy as np\n\nfrom delaunay_interpolator import Interpolator\n\nimport finagle\n\nimport components\nimport page_numbers\n\nAPPROX_DPI = 265\n\nINCH_HEIGHT = 14.9\nINCH_WIDTH = 23.0\n\n# Four corners of the logical sheet\nINCH_T_L = (INCH_HEIGHT, 0)\nINCH_T_R = (INCH_HEIGHT, INCH_WIDTH)\nINCH_B_L = (0, 0)\nINCH_B_R = (0, INCH_WIDTH)\n\n# Four corners of the thick exterior border\n# (The inner side is the logical sheet. Thickness is 16/265 = 0.06\")\nINCH_T_L_E = (INCH_HEIGHT + .03, -.03)\nINCH_T_R_E = (INCH_HEIGHT + .03, INCH_WIDTH + .03)\nINCH_B_L_E = (-.03, -.03)\nINCH_B_R_E = (-.03, INCH_WIDTH + .03)\n\n# Four corners of the thin interior border\nINCH_T_L_I = (INCH_HEIGHT - .2, .2)\nINCH_T_R_I = (INCH_HEIGHT - .2, INCH_WIDTH - .2)\nINCH_B_L_I = (.2, .2)\nINCH_B_R_I = (.2, INCH_WIDTH - .2)\n\n# Top left corner of documentation box\nINCH_T_L_DOCBOX = (4.10, 18.40)\n\nLINE_CORR = 20\n\nclass Point():\n\n def __init__(self, pix=(None, None), inch=(None, None)):\n self.py, self.px = pix\n self.iy, self.ix = inch\n\n def get_pix(self):\n return (self.py, self.px)\n\n def set_pix(self, pix):\n self.py, self.px = pix\n\n pix = property(get_pix, set_pix)\n\n def get_inch(self):\n return (self.iy, self.ix)\n\n def set_inch(self, inch):\n self.iy, self.ix = inch\n\n inch = property(get_inch, set_inch)\n\n def proj_pix(self, sheet):\n self.pix = sheet.inch2pix(self.inch)\n\n def proj_inch(self, sheet):\n self.inch = sheet.pix2inch(self.pix)\n\n def __str__(self):\n return \" \".join(\n (\n \"<P\",\n str(self.py),\n str(self.px),\n str(self.iy),\n str(self.ix),\n \">\",\n )\n )\n\ndef inch2proj(inch):\n return (int((INCH_HEIGHT - inch[0]) * 50), int(inch[1] * 50))\n\ndef proj2inch(pix):\n return (INCH_HEIGHT - pix[0] / 50, pix[1] / 50)\n\ndef write_image_normalized(img, fno, **kwargs):\n ''' Normalize (copy of) image to [0…255] and write to file '''\n min_val = np.amin(img)\n oimg = img - min_val\n max_val = np.amax(oimg)\n oimg *= (255. / max_val)\n imageio.imwrite(fno, oimg.astype(np.uint8), **kwargs)\n\ndef inch_window(inch, size):\n inch_tl_y = inch[0] + size[0] / 2\n inch_tl_x = inch[1] - size[1] / 2\n inch_br_y = inch_tl_y - size[0]\n inch_br_x = inch_tl_x + size[1]\n return (inch_tl_y, inch_tl_x), (inch_br_y, inch_br_x)\n\ndef window(pix, size):\n pix_tl_y = pix[0] - size[0] // 2\n pix_tl_x = pix[1] - size[1] // 2\n pix_br_y = pix_tl_y + size[0]\n pix_br_x = pix_tl_x + size[1]\n return (pix_tl_y, pix_tl_x), (pix_br_y, pix_br_x)\n\ndef snippet(image, pix_tl, pix_br):\n assert pix_tl[0] < pix_br[0]\n assert pix_tl[1] < pix_br[1]\n return image[pix_tl[0]:pix_br[0], pix_tl[1]:pix_br[1]]\n\ndef add_big_marker(image, pix_y, pix_x):\n ''' As it says in the tin... '''\n for distance, value in (\n (19, -1),\n (11, 1),\n (3, -1),\n ):\n image[\n pix_y - distance : pix_y + distance + 1,\n pix_x - distance : pix_x + distance + 1,\n ] = value\n image[pix_y][pix_x] = -value\n\ndef add_small_marker(image, pix_y, pix_x):\n ''' As it says in the tin... '''\n for distance, value in (\n (7, -1),\n (5, 1),\n (1, -1),\n ):\n image[\n pix_y - distance : pix_y + distance + 1,\n pix_x - distance : pix_x + distance + 1,\n ] = value\n image[pix_y][pix_x] = -value\n\nclass XY_Histogram():\n ''' A XY Histogram '''\n\n def __init__(self, img, pix_y, pix_x, dimension, gate = .66, minwidth = 5):\n ''' Find the average coordinate of the coords with the longest runs '''\n pix_y0 = pix_y - dimension // 2\n pix_x0 = pix_x - dimension // 2\n self.snippet = np.array(\n img[pix_y0 : pix_y0 + dimension, pix_x0 : pix_x0 + dimension]\n )\n histogram_x = []\n histogram_y = []\n\n for ipix_x in range(dimension):\n cur_run_x = 0\n pix_x_runs = set((0,))\n cur_run_y = 0\n pix_y_runs = set((0,))\n for ipix_y in range(dimension):\n if self.snippet[ipix_y][ipix_x] < 0:\n cur_run_x += 1\n else:\n pix_x_runs.add(cur_run_x)\n cur_run_x = 0\n if self.snippet[ipix_x][ipix_y] < 0:\n cur_run_y += 1\n else:\n pix_y_runs.add(cur_run_y)\n cur_run_y = 0\n pix_x_runs.add(cur_run_x)\n pix_y_runs.add(cur_run_y)\n histogram_x.append(max(pix_x_runs))\n histogram_y.append(max(pix_y_runs))\n\n if histogram_x:\n self.max_x = max(histogram_x)\n else:\n self.max_x = 0\n if histogram_y:\n self.max_y = max(histogram_y)\n else:\n self.max_y = 0\n\n retval=[]\n for hist in (histogram_y, histogram_x):\n if not hist:\n retval.append(2)\n continue\n\n threshold = gate * max(hist)\n # print(list(j for j in hist if j > threshold))\n pixel_sum = 0\n sum_count = 0\n for pixel, run_length in enumerate(hist):\n if run_length > threshold:\n pixel_sum += pixel\n sum_count += 1\n if sum_count >= minwidth:\n retval.append(int(pixel_sum/sum_count))\n else:\n retval.append(2)\n\n if histogram_x and histogram_y:\n add_small_marker(self.snippet, retval[0], retval[1])\n\n self.val_y = retval[0] + pix_y0\n self.val_x = retval[1] + pix_x0\n\n def __str__(self):\n return \"<XY %.2f %.2f %.2f %.2f>\" % (\n self.val_x, self.val_y,\n self.max_x, self.max_y,\n )\n\nclass LandMark(Point):\n ''' A landmark links pixel coordinates to inch coordinates '''\n\n def __init__(self, pix_y, pix_x, inch_y, inch_x, name):\n super().__init__((pix_y, pix_x), (inch_y, inch_x))\n self.name = name\n\n def __str__(self):\n return \" \".join([\n \"<LM\",\n self.name,\n str(self.py),\n str(self.px),\n str(self.iy),\n str(self.ix),\n ])\n\n def dump(self, file):\n ''' Write landmark to file '''\n file.write(\n \" \".join(\n (\n str(self.py),\n str(self.px),\n str(self.iy),\n str(self.ix),\n self.name,\n )\n ) + \"\\n\"\n )\n\ndef per_image_finagle_constants(board, sheet, cls):\n ''' Set any hardcoded properties for this specific image '''\n i = finagle.FINAGLE_PAGES.get(board)\n if not i:\n return\n i = i.get(sheet)\n if not i:\n return\n print(\"Configured Properties for\", board, sheet)\n for prop, val in i.items():\n print(\" \", prop, val)\n setattr(cls, prop, val)\n\nclass Sheet():\n\n ''' One Schematic Image file '''\n\n def __init__(self, argv):\n assert len(argv) == 3\n self.step = argv[0].split(\"_\")[1]\n self.board = argv[1]\n self.sheet = argv[2]\n self.page = page_numbers.page_number(self.board, self.sheet)\n self.sch_file = \"Proj/%s/pg_%02d.kicad_sch\" % (self.board, self.page)\n\n self.dstdir = self.board + \"/\" + self.sheet + \"/\"\n self.fn_pfx = self.dstdir + self.step + \"_\"\n\n per_image_finagle_constants(self.board, self.sheet, self)\n\n self.bom = components.BOM()\n\n self.landmarks = []\n try:\n self.load_landmarks()\n except FileNotFoundError:\n pass\n self.pix2inch_x = None\n self.pix2inch_y = None\n self.inch2pix_x = None\n self.inch2pix_y = None\n self.img = None\n self.proj50 = None\n\n print(\"Processing\", self.board, self.sheet, \"step\", self.step, \"in\", self.dstdir)\n\n def find_stepped_file(self, basename):\n ''' Find the most recent step for a file '''\n filename = None\n for i in sorted(glob.glob(self.dstdir + \"???_\" + basename)):\n if i < self.fn_pfx:\n filename = i\n if filename is None:\n raise FileNotFoundError(\"No (earlier) %s file found\" % basename)\n return filename\n\n def load_raw_image(self):\n ''' Load and normalize image '''\n filename = self.find_stepped_file(\"raw_image.png\")\n self.img = imageio.imread(filename).astype(np.float)\n self.img -= np.amin(self.img)\n self.img /= np.amax(self.img) * .5\n self.img -= 1\n print(\"Loaded\", filename)\n\n def load_proj50_image(self):\n ''' Load and normalize image '''\n filename = self.find_stepped_file(\"proj50.png\")\n self.proj50 = imageio.imread(filename).astype(np.float)\n self.proj50 -= np.amin(self.proj50)\n self.proj50 /= np.amax(self.proj50) * .5\n self.proj50 -= 1\n print(\"Loaded\", filename)\n\n def load_components(self):\n ''' ... '''\n filename = self.find_stepped_file(\"components.txt\")\n self.bom.load_from_file(filename)\n print(\"Loaded\", filename)\n\n def write_components(self):\n ''' ... '''\n self.bom.write_to_file(self.fn_pfx + \"components.txt\")\n\n def write_image(self, basename, img=None):\n ''' Write the image to a file '''\n filename = self.fn_pfx + basename + \".png\"\n if img is None:\n img = self.img\n print(\"Writing to\", filename)\n write_image_normalized(img, filename)\n\n def project(self, resolution):\n ''' Create a projected image with resolution pixels per inch '''\n print(\"Projecting for\", resolution, \"DPI\")\n proj_height = int(INCH_HEIGHT * resolution)\n proj_width = int(INCH_WIDTH * resolution)\n projected = np.zeros([proj_height, proj_width], dtype=np.float)\n\n for proj_y in range(proj_height):\n for proj_x in range(proj_width):\n inch_x = proj_x / resolution\n inch_y = proj_y / resolution\n pix_y, pix_x = self.inch2pix((INCH_HEIGHT - inch_y, inch_x))\n if pix_x != -9999 and pix_y != -9999:\n val = self.img[pix_y][pix_x]\n projected[proj_y][proj_x] = val\n\n filename = self.dstdir + '/proj_%d.png' % resolution\n return projected\n\n def add_grid(self, img=None):\n ''' Add inch->pix projected grid '''\n print(\"Adding grid\")\n if img is None:\n img = self.img\n pix_height, pix_width = img.shape\n if True:\n for inch_x in range(0, int(INCH_WIDTH * 100), 10):\n for inch_y in range(0, int(INCH_HEIGHT *100), 10):\n pix_y, pix_x = self.inch2pix(\n (\n inch_y * .01 + .05,\n inch_x * .01 + .05)\n )\n if pix_x < 0 or pix_y < 0 or pix_x >= pix_width or pix_y >= pix_height:\n continue\n img[pix_y][pix_x] *= -1\n for i in (1, 2):\n img[pix_y+i][pix_x] *= -1\n img[pix_y-i][pix_x] *= -1\n img[pix_y][pix_x+i] *= -1\n img[pix_y][pix_x-i] *= -1\n\n if False:\n for inch_x in range(0, int(INCH_WIDTH * 100), 10):\n for inch_y in range(0, int(INCH_HEIGHT * 100), 1):\n pix_y, pix_x = self.inch2pix((inch_y * .01 + .05, inch_x * .01 + .05))\n if pix_x < 0 or pix_y < 0 or pix_x >= pix_width or pix_y >= pix_height:\n continue\n img[pix_y-1:pix_y+1, pix_x-1:pix_x+1] *= -1\n\n if False:\n for inch_x in range(0, int(INCH_WIDTH * 100), 1):\n for inch_y in range(0, int(INCH_HEIGHT * 100), 10):\n pix_y, pix_x = self.inch2pix((inch_y * .01 + .05, inch_x *.01 + .05))\n if pix_x < 0 or pix_y < 0 or pix_x >= pix_width or pix_y >= pix_height:\n continue\n img[pix_y-1:pix_y+1, pix_x-1:pix_x+1] *= -1\n\n def pix2inch(self, pix):\n ''' Interpolate from pixels to inches '''\n\n if not self.pix2inch_x or len(self.pix2inch_x) != len(self.landmarks):\n\n self.pix2inch_x = Interpolator(\n [(l.py, l.px, l.ix) for l in self.landmarks if None not in (l.py, l.px, l.ix)]\n )\n\n self.pix2inch_y = Interpolator(\n [(l.py, l.px, l.iy) for l in self.landmarks if None not in (l.py, l.px, l.iy)]\n )\n\n return (self.pix2inch_y.lookup(*pix), self.pix2inch_x.lookup(*pix))\n\n def inch2pix(self, inch):\n ''' Interpolate from inches to pixels '''\n\n if not self.inch2pix_x or len(self.inch2pix_x) != len(self.landmarks):\n\n self.inch2pix_x = Interpolator(\n [(l.iy, l.ix, l.px) for l in self.landmarks if None not in (l.iy, l.ix, l.px)]\n )\n\n self.inch2pix_y = Interpolator(\n [(l.iy, l.ix, l.py) for l in self.landmarks if None not in (l.iy, l.ix, l.py)]\n )\n\n return (int(self.inch2pix_y.lookup(*inch)), int(self.inch2pix_x.lookup(*inch)))\n\n def add_landmark(self, pix_y, pix_x, inch_y, inch_x, name):\n ''' Add a landmark '''\n retval = LandMark(pix_y, pix_x, inch_y, inch_x, name)\n self.landmarks.append(retval)\n\n # When we only project one coordinate, also add an the inverse \"half\" landmark\n if pix_x is None:\n _pix_y, pix_x = self.inch2pix((inch_y, inch_x))\n lm = LandMark(pix_y, pix_x, inch_y, None, name)\n self.landmarks.append(lm)\n elif pix_y is None:\n pix_y, _pix_x = self.inch2pix((inch_y, inch_x))\n lm = LandMark(pix_y, pix_x, None, inch_x, name)\n self.landmarks.append(lm)\n elif inch_x is None:\n _inch_y, inch_x = self.pix2inch((pix_y, pix_x))\n lm = LandMark(pix_y, None, inch_y, inch_x, name)\n self.landmarks.append(lm)\n elif inch_y is None:\n inch_y, _inch_x = self.pix2inch((pix_y, pix_x))\n lm = LandMark(None, pix_x, inch_y, inch_x, name)\n self.landmarks.append(lm)\n\n return retval\n\n def write_landmarks(self):\n ''' Dump the landmarks '''\n filename = self.fn_pfx + \"landmarks.txt\"\n with open(filename, \"w\") as file:\n for i in self.landmarks:\n i.dump(file)\n print(\"Wrote\", filename)\n\n def load_landmarks(self):\n ''' Load latest landmarks from previous steps '''\n\n self.landmarks = []\n self.inch2pix_y = None\n self.inch2pix_x = None\n self.pix2inch_y = None\n self.pix2inch_x = None\n\n filename = self.find_stepped_file(\"landmarks.txt\")\n with open(filename, \"r\") as file:\n for i in file:\n j = i.split(maxsplit=5)\n l = []\n for k in j[0:4]:\n if k == \"None\":\n l.append(None)\n else:\n l.append(float(k))\n self.add_landmark(*(l), j[4])\n print(\"Loaded\", filename)\n\n def write_interpolators(self):\n ''' Dump the geometry of the interpolators '''\n self.pix2inch((0, 0))\n self.inch2pix((0, 0))\n self.pix2inch_x.dump_x(self.fn_pfx + \"pix2inch_x\", inv=(True,True))\n self.pix2inch_y.dump_y(self.fn_pfx + \"pix2inch_y\", inv=(True,False))\n self.inch2pix_x.dump_x(self.fn_pfx + \"inch2pix_x\", inv=(False,False))\n self.inch2pix_y.dump_y(self.fn_pfx + \"inch2pix_y\", inv=(False,False))\n with open(self.fn_pfx + \"_.g\", \"w\") as file:\n file.write('set term png size 800,480\\n')\n for i in (\n \"pix2inch_x\",\n \"pix2inch_y\",\n \"inch2pix_x\",\n \"inch2pix_y\",\n ):\n file.write('set output \"' + self.fn_pfx + i + '.png\"\\n')\n file.write('load \"' + self.fn_pfx + i + '.g\"\\n')\n subprocess.run([\"gnuplot\", self.fn_pfx + \"_.g\"])\n\n def write_interpolated(self):\n img = np.array(self.img)\n self.add_grid(img)\n if True:\n for landmark in self.landmarks:\n if None in (landmark.py, landmark.px):\n continue\n py = int(landmark.py)\n px = int(landmark.px)\n if landmark.iy is not None:\n img[py - 3, px-40:px+40] *= -1\n img[py + 3, px-40:px+40] *= -1\n if landmark.ix is not None:\n img[py-40:py+40, px - 3] *= -1\n img[py-40:py+40, px + 3] *= -1\n write_image_normalized(img, self.fn_pfx + \"landmarks.png\")\n\n def dark_median(self, pix_y, pix_x, pix_height, pix_width):\n ''' return median x & y for dark pixels in pix_x±pix_width, pix_y±pix_height window '''\n snippet = self.img[\n pix_y - pix_height : pix_y + pix_height + 1,\n pix_x - pix_width : pix_x + pix_width + 1,\n ]\n pix_x_list = []\n pix_y_list = []\n for delta_y in range(-pix_height, pix_height + 1):\n for delta_x in range(-pix_width, pix_width + 1):\n if self.img[pix_y + delta_y][pix_x + delta_x] < 0:\n pix_y_list.append(pix_y + delta_y)\n pix_x_list.append(pix_x + delta_x)\n pix_x_list.sort()\n if pix_x_list:\n pix_x_median = pix_x_list[len(pix_x_list)//2]\n else:\n pix_x_median = None\n pix_y_list.sort()\n if pix_y_list:\n pix_y_median = pix_y_list[len(pix_y_list)//2]\n else:\n pix_y_median = None\n\n if pix_x_median and pix_y_median:\n add_small_marker(\n snippet,\n pix_height + pix_y_median - pix_y,\n pix_width + pix_x_median - pix_x,\n )\n return pix_y_median, pix_x_median, snippet\n\n def hide_borders(self, img, dpi):\n ''' Paint over the drawing border and docbox '''\n\n EXTRA = .05\n\n def inch_x(x):\n return int(x * dpi)\n\n def inch_y(y):\n # inch-coords are in quadrant I, pix-coords are in IV\n return int((INCH_HEIGHT - y) * dpi)\n\n def blank(tl, br):\n img[\n inch_y(tl[0]) : inch_y(br[0]),\n inch_x(tl[1]) : inch_x(br[1])\n ] *= 0\n\n blank((INCH_B_L_I[0] + EXTRA, 0), (0, INCH_WIDTH))\n blank((INCH_HEIGHT, 0), (INCH_T_R_I[0] - EXTRA, INCH_WIDTH))\n blank((INCH_HEIGHT, 0), (0, INCH_B_L_I[1] + EXTRA))\n blank((INCH_HEIGHT, INCH_T_R_I[1] - EXTRA), (0, INCH_WIDTH))\n blank((INCH_T_L_DOCBOX[0] + EXTRA, INCH_T_L_DOCBOX[1] - EXTRA), (0, INCH_WIDTH))\n"
] | [
[
"numpy.amin",
"numpy.amax",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
willianfco/shap | [
"895a796b20cb2ab6b158a4cd4326d8f4d00ca615"
] | [
"shap/explainers/_kernel.py"
] | [
"from ..utils._legacy import convert_to_instance, convert_to_model, match_instance_to_data, match_model_to_data\nfrom ..utils._legacy import convert_to_instance_with_index, convert_to_link, IdentityLink, convert_to_data, DenseData, SparseData\nfrom ..utils import safe_isinstance\nfrom scipy.special import binom\nfrom scipy.sparse import issparse\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport logging\nimport copy\nimport itertools\nimport warnings\nimport gc\nfrom sklearn.linear_model import LassoLarsIC, Lasso, lars_path\nfrom tqdm.auto import tqdm\nfrom ._explainer import Explainer\n\nlog = logging.getLogger('shap')\n\n\n\nclass Kernel(Explainer):\n \"\"\"Uses the Kernel SHAP method to explain the output of any function.\n\n Kernel SHAP is a method that uses a special weighted linear regression\n to compute the importance of each feature. The computed importance values\n are Shapley values from game theory and also coefficents from a local linear\n regression.\n\n\n Parameters\n ----------\n model : function or iml.Model\n User supplied function that takes a matrix of samples (# samples x # features) and\n computes a the output of the model for those samples. The output can be a vector\n (# samples) or a matrix (# samples x # model outputs).\n\n data : numpy.array or pandas.DataFrame or shap.common.DenseData or any scipy.sparse matrix\n The background dataset to use for integrating out features. To determine the impact\n of a feature, that feature is set to \"missing\" and the change in the model output\n is observed. Since most models aren't designed to handle arbitrary missing data at test\n time, we simulate \"missing\" by replacing the feature with the values it takes in the\n background dataset. So if the background dataset is a simple sample of all zeros, then\n we would approximate a feature being missing by setting it to zero. For small problems\n this background dataset can be the whole training set, but for larger problems consider\n using a single reference value or using the kmeans function to summarize the dataset.\n Note: for sparse case we accept any sparse matrix but convert to lil format for\n performance.\n\n link : \"identity\" or \"logit\"\n A generalized linear model link to connect the feature importance values to the model\n output. Since the feature importance values, phi, sum up to the model output, it often makes\n sense to connect them to the output with a link function where link(output) = sum(phi).\n If the model output is a probability then the LogitLink link function makes the feature\n importance values have log-odds units.\n\n Examples\n --------\n See :ref:`Kernel Explainer Examples <kernel_explainer_examples>`\n \"\"\"\n\n def __init__(self, model, data, link=IdentityLink(), **kwargs):\n\n # convert incoming inputs to standardized iml objects\n self.link = convert_to_link(link)\n self.model = convert_to_model(model)\n self.keep_index = kwargs.get(\"keep_index\", False)\n self.keep_index_ordered = kwargs.get(\"keep_index_ordered\", False)\n self.data = convert_to_data(data, keep_index=self.keep_index)\n model_null = match_model_to_data(self.model, self.data)\n\n # enforce our current input type limitations\n assert isinstance(self.data, DenseData) or isinstance(self.data, SparseData), \\\n \"Shap explainer only supports the DenseData and SparseData input currently.\"\n assert not self.data.transposed, \"Shap explainer does not support transposed DenseData or SparseData currently.\"\n\n # warn users about large background data sets\n if len(self.data.weights) > 100:\n log.warning(\"Using \" + str(len(self.data.weights)) + \" background data samples could cause \" +\n \"slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to \" +\n \"summarize the background as K samples.\")\n\n # init our parameters\n self.N = self.data.data.shape[0]\n self.P = self.data.data.shape[1]\n self.linkfv = np.vectorize(self.link.f)\n self.nsamplesAdded = 0\n self.nsamplesRun = 0\n\n # find E_x[f(x)]\n if isinstance(model_null, (pd.DataFrame, pd.Series)):\n model_null = np.squeeze(model_null.values)\n if safe_isinstance(model_null, \"tensorflow.python.framework.ops.EagerTensor\"):\n model_null = model_null.numpy()\n self.fnull = np.sum((model_null.T * self.data.weights).T, 0)\n self.expected_value = self.linkfv(self.fnull)\n\n # see if we have a vector output\n self.vector_out = True\n if len(self.fnull.shape) == 0:\n self.vector_out = False\n self.fnull = np.array([self.fnull])\n self.D = 1\n self.expected_value = float(self.expected_value)\n else:\n self.D = self.fnull.shape[0]\n\n\n def shap_values(self, X, **kwargs):\n \"\"\" Estimate the SHAP values for a set of samples.\n\n Parameters\n ----------\n X : numpy.array or pandas.DataFrame or any scipy.sparse matrix\n A matrix of samples (# samples x # features) on which to explain the model's output.\n\n nsamples : \"auto\" or int\n Number of times to re-evaluate the model when explaining each prediction. More samples\n lead to lower variance estimates of the SHAP values. The \"auto\" setting uses\n `nsamples = 2 * X.shape[1] + 2048`.\n\n l1_reg : \"num_features(int)\", \"auto\" (default for now, but deprecated), \"aic\", \"bic\", or float\n The l1 regularization to use for feature selection (the estimation procedure is based on\n a debiased lasso). The auto option currently uses \"aic\" when less that 20% of the possible sample\n space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF \"auto\" WILL CHANGE\n in a future version to be based on num_features instead of AIC.\n The \"aic\" and \"bic\" options use the AIC and BIC rules for regularization.\n Using \"num_features(int)\" selects a fix number of top features. Passing a float directly sets the\n \"alpha\" parameter of the sklearn.linear_model.Lasso model used for feature selection.\n \n gc_collect : bool\n Run garbage collection after each explanation round. Sometime needed for memory intensive explanations (default False).\n\n Returns\n -------\n array or list\n For models with a single output this returns a matrix of SHAP values\n (# samples x # features). Each row sums to the difference between the model output for that\n sample and the expected value of the model output (which is stored as expected_value\n attribute of the explainer). For models with vector outputs this returns a list\n of such matrices, one for each output.\n \"\"\"\n\n # convert dataframes\n if str(type(X)).endswith(\"pandas.core.series.Series'>\"):\n X = X.values\n elif str(type(X)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if self.keep_index:\n index_value = X.index.values\n index_name = X.index.name\n column_name = list(X.columns)\n X = X.values\n\n x_type = str(type(X))\n arr_type = \"'numpy.ndarray'>\"\n # if sparse, convert to lil for performance\n if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X):\n X = X.tolil()\n assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), \"Unknown instance type: \" + x_type\n assert len(X.shape) == 1 or len(X.shape) == 2, \"Instance must have 1 or 2 dimensions!\"\n\n # single instance\n if len(X.shape) == 1:\n data = X.reshape((1, X.shape[0]))\n if self.keep_index:\n data = convert_to_instance_with_index(data, column_name, index_name, index_value)\n explanation = self.explain(data, **kwargs)\n\n # vector-output\n s = explanation.shape\n if len(s) == 2:\n outs = [np.zeros(s[0]) for j in range(s[1])]\n for j in range(s[1]):\n outs[j] = explanation[:, j]\n return outs\n\n # single-output\n else:\n out = np.zeros(s[0])\n out[:] = explanation\n return out\n\n # explain the whole dataset\n elif len(X.shape) == 2:\n explanations = []\n for i in tqdm(range(X.shape[0]), disable=kwargs.get(\"silent\", False)):\n data = X[i:i + 1, :]\n if self.keep_index:\n data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name)\n explanations.append(self.explain(data, **kwargs))\n if kwargs.get(\"gc_collect\", False):\n gc.collect()\n\n # vector-output\n s = explanations[0].shape\n if len(s) == 2:\n outs = [np.zeros((X.shape[0], s[0])) for j in range(s[1])]\n for i in range(X.shape[0]):\n for j in range(s[1]):\n outs[j][i] = explanations[i][:, j]\n return outs\n\n # single-output\n else:\n out = np.zeros((X.shape[0], s[0]))\n for i in range(X.shape[0]):\n out[i] = explanations[i]\n return out\n\n def explain(self, incoming_instance, **kwargs):\n # convert incoming input to a standardized iml object\n instance = convert_to_instance(incoming_instance)\n match_instance_to_data(instance, self.data)\n\n # find the feature groups we will test. If a feature does not change from its\n # current value then we know it doesn't impact the model\n self.varyingInds = self.varying_groups(instance.x)\n if self.data.groups is None:\n self.varyingFeatureGroups = np.array([i for i in self.varyingInds])\n self.M = self.varyingFeatureGroups.shape[0]\n else:\n self.varyingFeatureGroups = [self.data.groups[i] for i in self.varyingInds]\n self.M = len(self.varyingFeatureGroups)\n groups = self.data.groups\n # convert to numpy array as it is much faster if not jagged array (all groups of same length)\n if self.varyingFeatureGroups and all(len(groups[i]) == len(groups[0]) for i in self.varyingInds):\n self.varyingFeatureGroups = np.array(self.varyingFeatureGroups)\n # further performance optimization in case each group has a single value\n if self.varyingFeatureGroups.shape[1] == 1:\n self.varyingFeatureGroups = self.varyingFeatureGroups.flatten()\n\n # find f(x)\n if self.keep_index:\n model_out = self.model.f(instance.convert_to_df())\n else:\n model_out = self.model.f(instance.x)\n if isinstance(model_out, (pd.DataFrame, pd.Series)):\n model_out = model_out.values\n self.fx = model_out[0]\n\n if not self.vector_out:\n self.fx = np.array([self.fx])\n\n # if no features vary then no feature has an effect\n if self.M == 0:\n phi = np.zeros((self.data.groups_size, self.D))\n phi_var = np.zeros((self.data.groups_size, self.D))\n\n # if only one feature varies then it has all the effect\n elif self.M == 1:\n phi = np.zeros((self.data.groups_size, self.D))\n phi_var = np.zeros((self.data.groups_size, self.D))\n diff = self.link.f(self.fx) - self.link.f(self.fnull)\n for d in range(self.D):\n phi[self.varyingInds[0],d] = diff[d]\n\n # if more than one feature varies then we have to do real work\n else:\n self.l1_reg = kwargs.get(\"l1_reg\", \"auto\")\n\n # pick a reasonable number of samples if the user didn't specify how many they wanted\n self.nsamples = kwargs.get(\"nsamples\", \"auto\")\n if self.nsamples == \"auto\":\n self.nsamples = 2 * self.M + 2**11\n\n # if we have enough samples to enumerate all subsets then ignore the unneeded samples\n self.max_samples = 2 ** 30\n if self.M <= 30:\n self.max_samples = 2 ** self.M - 2\n if self.nsamples > self.max_samples:\n self.nsamples = self.max_samples\n\n # reserve space for some of our computations\n self.allocate()\n\n # weight the different subset sizes\n num_subset_sizes = np.int(np.ceil((self.M - 1) / 2.0))\n num_paired_subset_sizes = np.int(np.floor((self.M - 1) / 2.0))\n weight_vector = np.array([(self.M - 1.0) / (i * (self.M - i)) for i in range(1, num_subset_sizes + 1)])\n weight_vector[:num_paired_subset_sizes] *= 2\n weight_vector /= np.sum(weight_vector)\n log.debug(\"weight_vector = {0}\".format(weight_vector))\n log.debug(\"num_subset_sizes = {0}\".format(num_subset_sizes))\n log.debug(\"num_paired_subset_sizes = {0}\".format(num_paired_subset_sizes))\n log.debug(\"M = {0}\".format(self.M))\n\n # fill out all the subset sizes we can completely enumerate\n # given nsamples*remaining_weight_vector[subset_size]\n num_full_subsets = 0\n num_samples_left = self.nsamples\n group_inds = np.arange(self.M, dtype='int64')\n mask = np.zeros(self.M)\n remaining_weight_vector = copy.copy(weight_vector)\n for subset_size in range(1, num_subset_sizes + 1):\n\n # determine how many subsets (and their complements) are of the current size\n nsubsets = binom(self.M, subset_size)\n if subset_size <= num_paired_subset_sizes: nsubsets *= 2\n log.debug(\"subset_size = {0}\".format(subset_size))\n log.debug(\"nsubsets = {0}\".format(nsubsets))\n log.debug(\"self.nsamples*weight_vector[subset_size-1] = {0}\".format(\n num_samples_left * remaining_weight_vector[subset_size - 1]))\n log.debug(\"self.nsamples*weight_vector[subset_size-1]/nsubsets = {0}\".format(\n num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets))\n\n # see if we have enough samples to enumerate all subsets of this size\n if num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets >= 1.0 - 1e-8:\n num_full_subsets += 1\n num_samples_left -= nsubsets\n\n # rescale what's left of the remaining weight vector to sum to 1\n if remaining_weight_vector[subset_size - 1] < 1.0:\n remaining_weight_vector /= (1 - remaining_weight_vector[subset_size - 1])\n\n # add all the samples of the current subset size\n w = weight_vector[subset_size - 1] / binom(self.M, subset_size)\n if subset_size <= num_paired_subset_sizes: w /= 2.0\n for inds in itertools.combinations(group_inds, subset_size):\n mask[:] = 0.0\n mask[np.array(inds, dtype='int64')] = 1.0\n self.addsample(instance.x, mask, w)\n if subset_size <= num_paired_subset_sizes:\n mask[:] = np.abs(mask - 1)\n self.addsample(instance.x, mask, w)\n else:\n break\n log.info(\"num_full_subsets = {0}\".format(num_full_subsets))\n\n # add random samples from what is left of the subset space\n nfixed_samples = self.nsamplesAdded\n samples_left = self.nsamples - self.nsamplesAdded\n log.debug(\"samples_left = {0}\".format(samples_left))\n if num_full_subsets != num_subset_sizes:\n remaining_weight_vector = copy.copy(weight_vector)\n remaining_weight_vector[:num_paired_subset_sizes] /= 2 # because we draw two samples each below\n remaining_weight_vector = remaining_weight_vector[num_full_subsets:]\n remaining_weight_vector /= np.sum(remaining_weight_vector)\n log.info(\"remaining_weight_vector = {0}\".format(remaining_weight_vector))\n log.info(\"num_paired_subset_sizes = {0}\".format(num_paired_subset_sizes))\n ind_set = np.random.choice(len(remaining_weight_vector), 4 * samples_left, p=remaining_weight_vector)\n ind_set_pos = 0\n used_masks = {}\n while samples_left > 0 and ind_set_pos < len(ind_set):\n mask.fill(0.0)\n ind = ind_set[ind_set_pos] # we call np.random.choice once to save time and then just read it here\n ind_set_pos += 1\n subset_size = ind + num_full_subsets + 1\n mask[np.random.permutation(self.M)[:subset_size]] = 1.0\n\n # only add the sample if we have not seen it before, otherwise just\n # increment a previous sample's weight\n mask_tuple = tuple(mask)\n new_sample = False\n if mask_tuple not in used_masks:\n new_sample = True\n used_masks[mask_tuple] = self.nsamplesAdded\n samples_left -= 1\n self.addsample(instance.x, mask, 1.0)\n else:\n self.kernelWeights[used_masks[mask_tuple]] += 1.0\n\n # add the compliment sample\n if samples_left > 0 and subset_size <= num_paired_subset_sizes:\n mask[:] = np.abs(mask - 1)\n\n # only add the sample if we have not seen it before, otherwise just\n # increment a previous sample's weight\n if new_sample:\n samples_left -= 1\n self.addsample(instance.x, mask, 1.0)\n else:\n # we know the compliment sample is the next one after the original sample, so + 1\n self.kernelWeights[used_masks[mask_tuple] + 1] += 1.0\n\n # normalize the kernel weights for the random samples to equal the weight left after\n # the fixed enumerated samples have been already counted\n weight_left = np.sum(weight_vector[num_full_subsets:])\n log.info(\"weight_left = {0}\".format(weight_left))\n self.kernelWeights[nfixed_samples:] *= weight_left / self.kernelWeights[nfixed_samples:].sum()\n\n # execute the model on the synthetic samples we have created\n self.run()\n\n # solve then expand the feature importance (Shapley value) vector to contain the non-varying features\n phi = np.zeros((self.data.groups_size, self.D))\n phi_var = np.zeros((self.data.groups_size, self.D))\n for d in range(self.D):\n vphi, vphi_var = self.solve(self.nsamples / self.max_samples, d)\n phi[self.varyingInds, d] = vphi\n phi_var[self.varyingInds, d] = vphi_var\n\n if not self.vector_out:\n phi = np.squeeze(phi, axis=1)\n phi_var = np.squeeze(phi_var, axis=1)\n\n return phi\n\n @staticmethod\n def not_equal(i, j):\n if isinstance(i, str) or isinstance(j, str):\n return 0 if i == j else 1\n return 0 if np.isclose(i, j, equal_nan=True) else 1\n\n def varying_groups(self, x):\n if not sp.sparse.issparse(x):\n varying = np.zeros(self.data.groups_size)\n for i in range(0, self.data.groups_size):\n inds = self.data.groups[i]\n x_group = x[0, inds]\n if sp.sparse.issparse(x_group):\n if all(j not in x.nonzero()[1] for j in inds):\n varying[i] = False\n continue\n x_group = x_group.todense()\n num_mismatches = np.sum(np.frompyfunc(self.not_equal, 2, 1)(x_group, self.data.data[:, inds]))\n varying[i] = num_mismatches > 0\n varying_indices = np.nonzero(varying)[0]\n return varying_indices\n else:\n varying_indices = []\n # go over all nonzero columns in background and evaluation data\n # if both background and evaluation are zero, the column does not vary\n varying_indices = np.unique(np.union1d(self.data.data.nonzero()[1], x.nonzero()[1]))\n remove_unvarying_indices = []\n for i in range(0, len(varying_indices)):\n varying_index = varying_indices[i]\n # now verify the nonzero values do vary\n data_rows = self.data.data[:, [varying_index]]\n nonzero_rows = data_rows.nonzero()[0]\n\n if nonzero_rows.size > 0:\n background_data_rows = data_rows[nonzero_rows]\n if sp.sparse.issparse(background_data_rows):\n background_data_rows = background_data_rows.toarray()\n num_mismatches = np.sum(np.abs(background_data_rows - x[0, varying_index]) > 1e-7)\n # Note: If feature column non-zero but some background zero, can't remove index\n if num_mismatches == 0 and not \\\n (np.abs(x[0, [varying_index]][0, 0]) > 1e-7 and len(nonzero_rows) < data_rows.shape[0]):\n remove_unvarying_indices.append(i)\n mask = np.ones(len(varying_indices), dtype=bool)\n mask[remove_unvarying_indices] = False\n varying_indices = varying_indices[mask]\n return varying_indices\n\n def allocate(self):\n if sp.sparse.issparse(self.data.data):\n # We tile the sparse matrix in csr format but convert it to lil\n # for performance when adding samples\n shape = self.data.data.shape\n nnz = self.data.data.nnz\n data_rows, data_cols = shape\n rows = data_rows * self.nsamples\n shape = rows, data_cols\n if nnz == 0:\n self.synth_data = sp.sparse.csr_matrix(shape, dtype=self.data.data.dtype).tolil()\n else:\n data = self.data.data.data\n indices = self.data.data.indices\n indptr = self.data.data.indptr\n last_indptr_idx = indptr[len(indptr) - 1]\n indptr_wo_last = indptr[:-1]\n new_indptrs = []\n for i in range(0, self.nsamples - 1):\n new_indptrs.append(indptr_wo_last + (i * last_indptr_idx))\n new_indptrs.append(indptr + ((self.nsamples - 1) * last_indptr_idx))\n new_indptr = np.concatenate(new_indptrs)\n new_data = np.tile(data, self.nsamples)\n new_indices = np.tile(indices, self.nsamples)\n self.synth_data = sp.sparse.csr_matrix((new_data, new_indices, new_indptr), shape=shape).tolil()\n else:\n self.synth_data = np.tile(self.data.data, (self.nsamples, 1))\n\n self.maskMatrix = np.zeros((self.nsamples, self.M))\n self.kernelWeights = np.zeros(self.nsamples)\n self.y = np.zeros((self.nsamples * self.N, self.D))\n self.ey = np.zeros((self.nsamples, self.D))\n self.lastMask = np.zeros(self.nsamples)\n self.nsamplesAdded = 0\n self.nsamplesRun = 0\n if self.keep_index:\n self.synth_data_index = np.tile(self.data.index_value, self.nsamples)\n\n def addsample(self, x, m, w):\n offset = self.nsamplesAdded * self.N\n if isinstance(self.varyingFeatureGroups, (list,)):\n for j in range(self.M):\n for k in self.varyingFeatureGroups[j]:\n if m[j] == 1.0:\n self.synth_data[offset:offset+self.N, k] = x[0, k]\n else:\n # for non-jagged numpy array we can significantly boost performance\n mask = m == 1.0\n groups = self.varyingFeatureGroups[mask]\n if len(groups.shape) == 2:\n for group in groups:\n self.synth_data[offset:offset+self.N, group] = x[0, group]\n else:\n # further performance optimization in case each group has a single feature\n evaluation_data = x[0, groups]\n # In edge case where background is all dense but evaluation data\n # is all sparse, make evaluation data dense\n if sp.sparse.issparse(x) and not sp.sparse.issparse(self.synth_data):\n evaluation_data = evaluation_data.toarray()\n self.synth_data[offset:offset+self.N, groups] = evaluation_data\n self.maskMatrix[self.nsamplesAdded, :] = m\n self.kernelWeights[self.nsamplesAdded] = w\n self.nsamplesAdded += 1\n\n def run(self):\n num_to_run = self.nsamplesAdded * self.N - self.nsamplesRun * self.N\n data = self.synth_data[self.nsamplesRun*self.N:self.nsamplesAdded*self.N,:]\n if self.keep_index:\n index = self.synth_data_index[self.nsamplesRun*self.N:self.nsamplesAdded*self.N]\n index = pd.DataFrame(index, columns=[self.data.index_name])\n data = pd.DataFrame(data, columns=self.data.group_names)\n data = pd.concat([index, data], axis=1).set_index(self.data.index_name)\n if self.keep_index_ordered:\n data = data.sort_index()\n modelOut = self.model.f(data)\n if isinstance(modelOut, (pd.DataFrame, pd.Series)):\n modelOut = modelOut.values\n self.y[self.nsamplesRun * self.N:self.nsamplesAdded * self.N, :] = np.reshape(modelOut, (num_to_run, self.D))\n\n # find the expected value of each output\n for i in range(self.nsamplesRun, self.nsamplesAdded):\n eyVal = np.zeros(self.D)\n for j in range(0, self.N):\n eyVal += self.y[i * self.N + j, :] * self.data.weights[j]\n\n self.ey[i, :] = eyVal\n self.nsamplesRun += 1\n\n def solve(self, fraction_evaluated, dim):\n eyAdj = self.linkfv(self.ey[:, dim]) - self.link.f(self.fnull[dim])\n s = np.sum(self.maskMatrix, 1)\n\n # do feature selection if we have not well enumerated the space\n nonzero_inds = np.arange(self.M)\n log.debug(\"fraction_evaluated = {0}\".format(fraction_evaluated))\n # if self.l1_reg == \"auto\":\n # warnings.warn(\n # \"l1_reg=\\\"auto\\\" is deprecated and in the next version (v0.29) the behavior will change from a \" \\\n # \"conditional use of AIC to simply \\\"num_features(10)\\\"!\"\n # )\n if (self.l1_reg not in [\"auto\", False, 0]) or (fraction_evaluated < 0.2 and self.l1_reg == \"auto\"):\n w_aug = np.hstack((self.kernelWeights * (self.M - s), self.kernelWeights * s))\n log.info(\"np.sum(w_aug) = {0}\".format(np.sum(w_aug)))\n log.info(\"np.sum(self.kernelWeights) = {0}\".format(np.sum(self.kernelWeights)))\n w_sqrt_aug = np.sqrt(w_aug)\n eyAdj_aug = np.hstack((eyAdj, eyAdj - (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))))\n eyAdj_aug *= w_sqrt_aug\n mask_aug = np.transpose(w_sqrt_aug * np.transpose(np.vstack((self.maskMatrix, self.maskMatrix - 1))))\n #var_norms = np.array([np.linalg.norm(mask_aug[:, i]) for i in range(mask_aug.shape[1])])\n\n # select a fixed number of top features\n if isinstance(self.l1_reg, str) and self.l1_reg.startswith(\"num_features(\"):\n r = int(self.l1_reg[len(\"num_features(\"):-1])\n nonzero_inds = lars_path(mask_aug, eyAdj_aug, max_iter=r)[1]\n\n # use an adaptive regularization method\n elif self.l1_reg == \"auto\" or self.l1_reg == \"bic\" or self.l1_reg == \"aic\":\n c = \"aic\" if self.l1_reg == \"auto\" else self.l1_reg\n nonzero_inds = np.nonzero(LassoLarsIC(criterion=c).fit(mask_aug, eyAdj_aug).coef_)[0]\n\n # use a fixed regularization coeffcient\n else:\n nonzero_inds = np.nonzero(Lasso(alpha=self.l1_reg).fit(mask_aug, eyAdj_aug).coef_)[0]\n\n if len(nonzero_inds) == 0:\n return np.zeros(self.M), np.ones(self.M)\n\n # eliminate one variable with the constraint that all features sum to the output\n eyAdj2 = eyAdj - self.maskMatrix[:, nonzero_inds[-1]] * (\n self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim]))\n etmp = np.transpose(np.transpose(self.maskMatrix[:, nonzero_inds[:-1]]) - self.maskMatrix[:, nonzero_inds[-1]])\n log.debug(\"etmp[:4,:] {0}\".format(etmp[:4, :]))\n\n # solve a weighted least squares equation to estimate phi\n tmp = np.transpose(np.transpose(etmp) * np.transpose(self.kernelWeights))\n etmp_dot = np.dot(np.transpose(tmp), etmp)\n try:\n tmp2 = np.linalg.inv(etmp_dot)\n except np.linalg.LinAlgError:\n tmp2 = np.linalg.pinv(etmp_dot)\n warnings.warn(\n \"Linear regression equation is singular, Moore-Penrose pseudoinverse is used instead of the regular inverse.\\n\"\n \"To use regular inverse do one of the following:\\n\"\n \"1) turn up the number of samples,\\n\"\n \"2) turn up the L1 regularization with num_features(N) where N is less than the number of samples,\\n\"\n \"3) group features together to reduce the number of inputs that need to be explained.\"\n )\n w = np.dot(tmp2, np.dot(np.transpose(tmp), eyAdj2))\n log.debug(\"np.sum(w) = {0}\".format(np.sum(w)))\n log.debug(\"self.link(self.fx) - self.link(self.fnull) = {0}\".format(\n self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])))\n log.debug(\"self.fx = {0}\".format(self.fx[dim]))\n log.debug(\"self.link(self.fx) = {0}\".format(self.link.f(self.fx[dim])))\n log.debug(\"self.fnull = {0}\".format(self.fnull[dim]))\n log.debug(\"self.link(self.fnull) = {0}\".format(self.link.f(self.fnull[dim])))\n phi = np.zeros(self.M)\n phi[nonzero_inds[:-1]] = w\n phi[nonzero_inds[-1]] = (self.link.f(self.fx[dim]) - self.link.f(self.fnull[dim])) - sum(w)\n log.info(\"phi = {0}\".format(phi))\n\n # clean up any rounding errors\n for i in range(self.M):\n if np.abs(phi[i]) < 1e-10:\n phi[i] = 0\n\n return phi, np.ones(len(phi))\n"
] | [
[
"numpy.sqrt",
"numpy.squeeze",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.hstack",
"scipy.sparse.issparse",
"numpy.reshape",
"numpy.arange",
"scipy.sparse.isspmatrix_lil",
"numpy.frompyfunc",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.Lasso",
"numpy.ceil",
"numpy.zeros",
"numpy.isclose",
"pandas.concat",
"numpy.nonzero",
"numpy.linalg.inv",
"scipy.special.binom",
"scipy.sparse.csr_matrix",
"numpy.floor",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.tile",
"numpy.ones",
"sklearn.linear_model.LassoLarsIC",
"numpy.linalg.pinv",
"numpy.vectorize",
"numpy.random.permutation",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
supercatex/ML_Turtorial | [
"d84d62e3bf55fcbaa5bfd90e2df929c87cfb4cdc"
] | [
"dataset/main.py"
] | [
"import os\nimport cv2\nimport numpy as np\n\n\n_INPUT_DIR = \"./images/\"\n_OUTPUT_DIR = \"./output/\"\n_TEST_DIR = \"./test/\"\n_NUM_OF_SAMPLES = 3000\n_SAMPLE_SIZE = (100, 100)\n\n\ndef add_noise(img):\n h, w, c = img.shape\n if c != 4:\n raise Exception(\"Only PNG format supported!\")\n\n dst = img.copy()\n\n tmp = dst[:, :, 0:3]\n hsv = cv2.cvtColor(tmp, cv2.COLOR_BGR2HSV)\n p1 = np.random.randint(255 - 35, 255 + 35) / 255\n hsv[:, :, 1] = np.array(hsv[:, :, 1] * p1, dtype=np.uint8)\n p2 = np.random.randint(255 - 100, 255) / 255\n hsv[:, :, 2] = np.array(hsv[:, :, 2] * p2, dtype=np.uint8)\n tmp = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n dst[:, :, 0:3] = tmp\n\n # for _ in range(int(h * w * 0.1)):\n # y = np.random.randint(0, h)\n # x = np.random.randint(0, w)\n # dst[y, x] = np.random.randint(0, 255, 4)\n # dst[y, x, 3] = 255\n return dst\n\n\ndef generate_image(img):\n h, w, c = img.shape\n if c != 4:\n raise Exception(\"Only PNG format supported!\")\n\n pts1 = np.float32([[0, 0], [h, 0], [0, w], [h, w]])\n pts2 = np.float32([\n [np.random.randint(0, h / 4), np.random.randint(0, w / 4)],\n [h - np.random.randint(0, h / 4), np.random.randint(0, w / 4)],\n [np.random.randint(0, h / 4), w - np.random.randint(0, w / 4)],\n [h - np.random.randint(0, h / 4), w - np.random.randint(0, w / 4)]\n ])\n\n m = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(img, m, (w, h))\n tmp = dst.copy()\n\n bg_list = os.listdir(\"./bg/\")\n bg_filename = np.random.choice(bg_list, 1)[0]\n bg = cv2.imread(os.path.join(\"./bg\", bg_filename), cv2.IMREAD_UNCHANGED)\n rh = np.random.randint(0, bg.shape[0] - h)\n rw = np.random.randint(0, bg.shape[1] - w)\n bg = bg[rh:rh+h, rw:rw+w, :]\n\n # r = np.random.randint(0, 255, (h, w, c), dtype=np.uint8)\n # r[:, :, c - 1] = 255\n r = bg\n for i in range(c - 1):\n tmp[:, :, i] = cv2.bitwise_and(r[:, :, i], 255 - dst[:, :, c - 1])\n dst[:, :, i] = cv2.bitwise_and(dst[:, :, i], dst[:, :, c - 1])\n dst += tmp\n\n return dst\n\n\ndef run():\n global _INPUT_DIR, _OUTPUT_DIR, _TEST_DIR, _NUM_OF_SAMPLES, _SAMPLE_SIZE\n\n if not os.path.exists(_TEST_DIR):\n os.makedirs(_TEST_DIR)\n\n n = 0\n for f in os.listdir(_INPUT_DIR):\n n += 1\n print(\"Processing:\", f, n)\n\n label_dir = os.path.join(_OUTPUT_DIR, f.split(\".\")[0][1:].zfill(3))\n if not os.path.exists(label_dir):\n os.makedirs(label_dir)\n\n image_filename = os.path.join(_INPUT_DIR, f)\n image = cv2.imread(image_filename, cv2.IMREAD_UNCHANGED)\n\n for i in range(_NUM_OF_SAMPLES):\n img = add_noise(image)\n img = generate_image(img)\n img = cv2.resize(img, _SAMPLE_SIZE)\n img_filename = os.path.join(label_dir, \"%d.jpg\" % i)\n cv2.imwrite(img_filename, img)\n del img\n del img_filename\n\n for i in range(int(_NUM_OF_SAMPLES * 0.01)):\n img = add_noise(image)\n img = generate_image(img)\n img = cv2.resize(img, _SAMPLE_SIZE)\n img_filename = os.path.join(_TEST_DIR, \"%d_%d.jpg\" % (n, i))\n cv2.imwrite(img_filename, img)\n del img\n del img_filename\n\n\nif __name__ == \"__main__\":\n run()\n"
] | [
[
"numpy.random.choice",
"numpy.array",
"numpy.float32",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rontho1992/election_feed | [
"5987a388e23bff33cf37a2923d21dcac5075adbb"
] | [
"parappa/strategies.py"
] | [
"from datetime import datetime as dt\nimport pandas as pd\nimport csv\n\nclass ElectionStrategies:\n\n def __init(self):\n pass\n\n def ap_init(self,state, results, party):\n no_fips = [\"NH\"]\n fips = [\"IA\",\"SC\",\"NV\"]\n self.state = state\n\n if self.state in no_fips:\n self.fips_process = False\n elif self.state in fips:\n self.fips_process = True\n else:\n pass\n\n filename= \"ap_{}_{}_{}.csv\".format(party,state,dt.now().strftime(\"%Y%m%d%H%M%S\"))\n f = open(filename, \"wb+\")\n\n if self.fips_process == False:\n self.__nofips_processing(results,f)\n elif self.fips_process == True:\n self.__fips_processing(results,f)\n else:\n pass\n\n return filename\n\n def widen_table(self,filename):\n initial_results = pd.read_csv(filename)\n initial_results.sort_values([\"reportingunitName\",\"candidateLast\"],\n inplace=True\n )\n wide_results = initial_results.pivot_table(index=[\"reportingunitName\",\n \"fipsCode\",\n \"precinctsTotal\",\n \"precinctsReporting\"\n ],\n columns=\"candidateLast\"\n )\n wide_results[\"state\"] = self.state\n\n wide_results.to_csv(\"apwide.csv\", header = False)\n\n\n def ms_processing(self, election_json):\n filename = \"ms_IA_{}.csv\".format(dt.now().strftime(\"%Y%m%d%H%M%S\"))\n f = open(filename, \"wb+\")\n results = csv.writer(f)\n\n results.writerow([\"county\",\n \"fipscode\",\n \"precinct\",\n \"candidate\",\n \"votes\",\n \"isWinner\",\n \"WinPercentage\"\n ])\n\n for precinct in election_json:\n candidates = precinct[\"Candidates\"]\n\n for candidate in candidates:\n results.writerow([precinct[\"County\"][\"Name\"],\n precinct[\"County\"][\"FIPSCode\"],\n precinct[\"Precinct\"][\"Name\"],\n candidate[\"Candidate\"][\"LastName\"],\n candidate.get(\"Result\", 0),\n candidate.get(\"IsWinner\", False),\n candidate.get(\"WinPercentage\", 0)\n ])\n\n f.close()\n\n ms_df = pd.read_csv(filename)\n\n return filename\n\n############################# PRIVATE FUNCTIONS ###############################\n### These functions are used to process data so that Tableau can properly ###\n### map the geographies. For states that have reporting units without FIPS ###\n### they need to have the state abbreviation in the table. ###\n################################################################################\n\n def __fips_processing(self, election_json, f):\n results = csv.writer(f)\n results.writerow([\"reportingunitName\",\n \"fipsCode\",\n \"precinctsTotal\",\n \"precinctsReporting\",\n \"candidateLast\",\n \"voteCount\",\n \"state\"\n ])\n\n for ru in election_json:\n candidates = ru[\"candidates\"]\n for candidate in candidates:\n results.writerow([ru[\"reportingunitName\"],\n ru[\"fipsCode\"],\n ru[\"precinctsTotal\"],\n ru[\"precinctsReporting\"],\n candidate[\"last\"],\n candidate[\"voteCount\"],\n self.state\n ])\n\n f.close()\n\n def __nofips_processing(self, election_json, f):\n results = csv.writer(f)\n results.writerow([\"reportingunitName\",\n \"fipsCode\",\n \"precinctsTotal\",\n \"precinctsReporting\",\n \"candidateLast\",\n \"voteCount\",\n \"state\"\n ])\n\n for ru in election_json:\n candidates = ru[\"candidates\"]\n for candidate in candidates:\n results.writerow([ru[\"reportingunitName\"].upper(),\n ru[\"fipsCode\"],\n ru[\"precinctsTotal\"],\n ru[\"precinctsReporting\"],\n candidate[\"last\"],\n candidate[\"voteCount\"],\n self.state\n ])\n f.close()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
NISystemsEngineering/rpyc-rfmx-remoting | [
"b90c7b84a61a7d063680bcc692bc8c32374291dd"
] | [
"rpyc/client/RFmxNRULModAccSingleCarrier.py"
] | [
"# DEMO EXAMPLE: Client side execution of RFmx NR on a remote server\r\nfrom pathlib import PureWindowsPath\r\nimport rpyc.utils.classic\r\nfrom matplotlib import pyplot\r\n\r\n# # # # # User Parameters # # # # # #\r\nhost_name = 'semoore-pxi' # Name or IP address of the RPyC server\r\nhost_port = 18861 # Port number of the RPyC server\r\ngenerator_resource_name = 'VST2_01' # Resource name of the Generator on the Host\r\nanalyzer_resource_name = 'VST2_01' # Resource name of the Analyzer on the Host\r\nwaveform_folder = 'c://niremote/waveforms/' # Example: waveformFolder = \"c:/niremote/waveforms\" , '.' denotes same directory as this .py file, if running on the server\r\nwaveform_file_name = 'nr100.tdms' # Name of the .tdms waveform to play\r\n # Note that in this example all of the waveforms are on the server and need to be pathed as such\r\n\r\n# # # # # RPyC Configuration # # # # #\r\n# Open connection to the service\r\nprint(\"Opening connection to RFmxService on \" + host_name)\r\nconn = rpyc.connect(host_name, host_port)\r\nprint(\"Connection opened successfully\")\r\n\r\n# Instantiate exported classes from the service\r\nprint(\"Importing classes from service..\", end='')\r\nSystem = conn.root.System\r\nNationalInstruments = conn.root.NationalInstruments\r\nNIRfsg = conn.root.NIRfsg\r\nNIRfsgPlayback = conn.root.NIRfsgPlayback\r\nInstrMX = conn.root.InstrMX\r\nNRMX = conn.root.NRMX\r\nprint(\"done\")\r\n\r\nprint(\"Setting measurement parameters..\", end='')\r\n# # # # # Global settings # # # # #\r\ncenter_frequency = 3.5e9\r\n\r\n# # # # # Generation settings # # # # #\r\nrfsg_selected_ports = \"\"\r\nrfsg_power_level = -10\r\nrfsg_external_attenuation = 0\r\nrfsg_reference_clock_source = NIRfsg.RfsgFrequencyReferenceSource.OnboardClock\r\nrfsg_waveform_name = \"waveform\"\r\nrfsg_script = 'script GenerateWaveform repeat forever generate waveform marker0(0) end repeat end script'\r\nrfsg_automatic_shared_lo = NIRfsgPlayback.RfsgPlaybackAutomaticSGSASharedLO.Enabled\r\n\r\n# # # # # Analysis settings # # # # #\r\ninstr_selected_ports = \"\"\r\ninstr_frequency_reference_source = InstrMX.RFmxInstrMXConstants.OnboardClock\r\ninstr_frequency_reference_frequency = 10e6\r\ninstr_automatic_shared_lo = InstrMX.RFmxInstrMXAutomaticSGSASharedLO.Enabled\r\n\r\nnr_reference_level = 0.0\r\nnr_external_attenuation = 0.0\r\n\r\nnr_enable_trigger = True\r\nnr_digital_edge_source = NRMX.RFmxNRMXConstants.PxiTriggerLine0\r\nnr_digital_edge = NRMX.RFmxNRMXDigitalEdgeTriggerEdge.Rising\r\nnr_trigger_delay = 0.0\r\n\r\nnr_frequency_range = NRMX.RFmxNRMXFrequencyRange.Range1\r\nnr_band = 78\r\nnr_cell_id = 0\r\nnr_carrier_bandwidth = 100e6\r\nnr_subcarrier_spacing = 30e3\r\n \r\nnr_auto_resource_block_detection_enabled = True\r\n\r\nnr_pusch_transform_precoding_enabled = False\r\nnr_pusch_modulation_type = NRMX.RFmxNRMXPuschModulationType.Qpsk\r\nnr_number_of_resource_block_clusters = 1\r\nnr_pusch_resource_block_offset = [0]\r\nnr_pusch_number_of_resource_blocks = [-1]\r\nnr_pusch_slot_allocation = \"0-Last\"\r\nnr_pusch_symbol_allocation = \"0-Last\"\r\n\r\nnr_pusch_dmrs_power_mode = NRMX.RFmxNRMXPuschDmrsPowerMode.CdmGroups\r\nnr_pusch_dmrs_power = 0.0\r\nnr_pusch_dmrs_configuration_type = NRMX.RFmxNRMXPuschDmrsConfigurationType.Type1\r\nnr_pusch_mapping_type = NRMX.RFmxNRMXPuschMappingType.TypeA\r\nnr_pusch_dmrs_type_a_position = 2\r\nnr_pusch_dmrs_duration = NRMX.RFmxNRMXPuschDmrsDuration.SingleSymbol\r\nnr_pusch_dmrs_additional_positions = 0\r\n\r\nnr_synchronization_mode = NRMX.RFmxNRMXModAccSynchronizationMode.Slot\r\n\r\nnr_measurement_length_unit = NRMX.RFmxNRMXModAccMeasurementLengthUnit.Slot\r\nnr_measurement_offset = 0\r\nnr_measurement_length = 1\r\n\r\nnr_averaging_enabled = False\r\nnr_averaging_count = 10\r\n\r\nnr_measurement_timeout = 10.0\r\nprint(\"done\")\r\n\r\n# # # # # Execute Measurements # # # # #\r\n\r\n# Initialize Generator\r\nprint(\"Initializing generator..\", end='')\r\nrfsg_session = NIRfsg.NIRfsg(generator_resource_name, True, False, \"\")\r\nrfsg_session.SignalPath.SelectedPorts = rfsg_selected_ports\r\nrfsg_handle = rfsg_session.GetInstrumentHandle().DangerousGetHandle()\r\nprint(\"done\")\r\n\r\n# Initialize Analyzer\r\nprint(\"Initializing analyzer..\", end='')\r\ninstr_session = InstrMX.RFmxInstrMX.GetSession(analyzer_resource_name, \"\")\r\ninstr_session.ResetEntireSession()\r\nprint(\"done\")\r\n\r\n# Generation configuration\r\nprint(\"Configuring generator..\", end='')\r\nrfsg_session.RF.Configure(center_frequency, rfsg_power_level)\r\nrfsg_session.FrequencyReference.Configure(rfsg_reference_clock_source, 10e6)\r\nrfsg_session.RF.ExternalGain = -rfsg_external_attenuation\r\n# The server will be running on Windows and needs a PureWindowsPath to locate the waveform\r\nNIRfsgPlayback.NIRfsgPlayback.ReadAndDownloadWaveformFromFile(rfsg_handle, str(PureWindowsPath(waveform_folder, waveform_file_name)), rfsg_waveform_name)\r\nNIRfsgPlayback.NIRfsgPlayback.StoreAutomaticSGSASharedLO(rfsg_handle, \"\", rfsg_automatic_shared_lo)\r\nNIRfsgPlayback.NIRfsgPlayback.SetScriptToGenerateSingleRfsg(rfsg_handle, rfsg_script)\r\nrfsg_session.DeviceEvents.MarkerEvents[0].ExportedOutputTerminal = NIRfsg.RfsgMarkerEventExportedOutputTerminal.PxiTriggerLine0\r\nprint(\"done\")\r\n\r\n# Measurement configuration\r\nprint(\"Configuring analyzer..\", end='')\r\n\r\n# Get NR Handle\r\nnr = InstrMX.RFmxNRMXExtension.GetNRSignalConfiguration(instr_session)\r\ninstr_session.ConfigureFrequencyReference(\"\", instr_frequency_reference_source, instr_frequency_reference_frequency)\r\ninstr_session.ConfigureAutomaticSGSASharedLO(\"\", instr_automatic_shared_lo)\r\n\r\n# Configure the NR Measurement\r\nnr.SetSelectedPorts(\"\", instr_selected_ports)\r\nnr.ConfigureRF(\"\", center_frequency, nr_reference_level, nr_external_attenuation)\r\nnr.ConfigureDigitalEdgeTrigger(\"\", nr_digital_edge_source, nr_digital_edge, nr_trigger_delay, nr_enable_trigger)\r\n\r\nnr.SetFrequencyRange(\"\", nr_frequency_range)\r\nnr.ComponentCarrier.SetBandwidth(\"\", nr_carrier_bandwidth)\r\nnr.ComponentCarrier.SetCellID(\"\", nr_cell_id)\r\nnr.SetBand(\"\", nr_band)\r\nnr.ComponentCarrier.SetBandwidthPartSubcarrierSpacing(\"\", nr_subcarrier_spacing)\r\nnr.SetAutoResourceBlockDetectionEnabled(\"\", nr_auto_resource_block_detection_enabled)\r\n\r\nnr.ComponentCarrier.SetPuschTransformPrecodingEnabled(\"\", nr_pusch_transform_precoding_enabled)\r\nnr.ComponentCarrier.SetPuschSlotAllocation(\"\", nr_pusch_slot_allocation)\r\nnr.ComponentCarrier.SetPuschSymbolAllocation(\"\", nr_pusch_symbol_allocation)\r\nnr.ComponentCarrier.SetPuschModulationType(\"\", nr_pusch_modulation_type)\r\n\r\nnr.ComponentCarrier.SetPuschNumberOfResourceBlockClusters(\"\", nr_number_of_resource_block_clusters)\r\n\r\nnr_subblock_string = NRMX.RFmxNRMX.BuildSubblockString(\"\", 0)\r\nnr_carrier_string = NRMX.RFmxNRMX.BuildCarrierString(nr_subblock_string, 0)\r\nnr_bandwidth_part_string = NRMX.RFmxNRMX.BuildBandwidthPartString(nr_carrier_string, 0)\r\nnr_user_string = NRMX.RFmxNRMX.BuildUserString(nr_bandwidth_part_string, 0)\r\nnr_pusch_string = NRMX.RFmxNRMX.BuildPuschString(nr_user_string, 0)\r\nfor i in range(nr_number_of_resource_block_clusters):\r\n nr_pusch_cluster_string = NRMX.RFmxNRMX.BuildPuschClusterString(nr_pusch_string, i)\r\n nr.ComponentCarrier.SetPuschResourceBlockOffset(nr_pusch_cluster_string, nr_pusch_resource_block_offset[i])\r\n nr.ComponentCarrier.SetPuschNumberOfResourceBlocks(nr_pusch_cluster_string, nr_pusch_number_of_resource_blocks[i])\r\n\r\nnr.ComponentCarrier.SetPuschDmrsPowerMode(\"\", nr_pusch_dmrs_power_mode)\r\nnr.ComponentCarrier.SetPuschDmrsPower(\"\", nr_pusch_dmrs_power)\r\nnr.ComponentCarrier.SetPuschDmrsConfigurationType(\"\", nr_pusch_dmrs_configuration_type)\r\nnr.ComponentCarrier.SetPuschMappingType(\"\", nr_pusch_mapping_type)\r\nnr.ComponentCarrier.SetPuschDmrsTypeAPosition(\"\", nr_pusch_dmrs_type_a_position)\r\nnr.ComponentCarrier.SetPuschDmrsDuration(\"\", nr_pusch_dmrs_duration)\r\nnr.ComponentCarrier.SetPuschDmrsAdditionalPositions(\"\", nr_pusch_dmrs_additional_positions)\r\nnr.SelectMeasurements(\"\", NRMX.RFmxNRMXMeasurementTypes.ModAcc, True)\r\n\r\nnr.ModAcc.Configuration.SetSynchronizationMode(\"\", nr_synchronization_mode)\r\nnr.ModAcc.Configuration.SetAveragingEnabled(\"\", nr_averaging_enabled)\r\nnr.ModAcc.Configuration.SetAveragingCount(\"\", nr_averaging_count)\r\n\r\nnr.ModAcc.Configuration.SetMeasurementLengthUnit(\"\", nr_measurement_length_unit)\r\nnr.ModAcc.Configuration.SetMeasurementOffset(\"\", nr_measurement_offset)\r\nnr.ModAcc.Configuration.SetMeasurementLength(\"\", nr_measurement_length)\r\n\r\nprint(\"done\")\r\n\r\n# Initiate and fetch results\r\nprint(\"Acquiring signal..\", end='')\r\nrfsg_session.Initiate()\r\nnr.Initiate(\"\", \"\")\r\ninstr_session.WaitForAcquisitionComplete(10)\r\nrfsg_session.Abort()\r\nprint('done')\r\n\r\n# _ Ignores the first returned value of the function. In this case it is an error code. Errors will still throw exceptions.\r\nprint('Fetching scalar results..', end='')\r\n_, composite_rms_evm_mean = nr.ModAcc.Results.GetCompositeRmsEvmMean(\"\", 0.0)\r\n_, composite_peak_evm_maximum = nr.ModAcc.Results.GetCompositePeakEvmMaximum(\"\", 0.0)\r\n_, composite_peak_evm_slot_index = nr.ModAcc.Results.GetCompositePeakEvmSlotIndex(\"\", 0)\r\n_, composite_peak_evm_symbol_index = nr.ModAcc.Results.GetCompositePeakEvmSymbolIndex(\"\", 0)\r\n_, composite_peak_evm_subcarrier_index = nr.ModAcc.Results.GetCompositePeakEvmSubcarrierIndex(\"\", 0)\r\n\r\n_, component_carrier_frequency_error_mean = nr.ModAcc.Results.GetComponentCarrierFrequencyErrorMean(\"\", 0.0)\r\n_, component_carrier_iq_origin_offset_mean = nr.ModAcc.Results.GetComponentCarrierIQOriginOffsetMean(\"\", 0.0)\r\n_, component_carrier_iq_gain_imbalance_mean = nr.ModAcc.Results.GetComponentCarrierIQGainImbalanceMean(\"\", 0.0)\r\n_, component_carrier_quadrature_error_mean = nr.ModAcc.Results.GetComponentCarrierQuadratureErrorMean(\"\", 0.0)\r\n_, in_band_emission_margin = nr.ModAcc.Results.GetInBandEmissionMargin(\"\", 0.0)\r\nprint('done')\r\n\r\n# print scalar results\r\nprint()\r\nprint(\"------------------Measurement------------------\")\r\nprint(\"Composite RMS EVM Mean (%) : {0}\".format(composite_rms_evm_mean))\r\nprint(\"Composite Peak EVM Maximum (%) : {0}\".format(composite_peak_evm_maximum))\r\nprint(\"Composite Peak EVM Slot Index : {0}\".format(composite_peak_evm_slot_index))\r\nprint(\"Composite Peak EVM Symbol Index : {0}\".format(composite_peak_evm_symbol_index))\r\nprint(\"Composite Peak EVM Subcarrier Index : {0}\".format(composite_peak_evm_subcarrier_index))\r\nprint(\"Component Carrier Frequency Error Mean (Hz) : {0}\".format(component_carrier_frequency_error_mean))\r\nprint(\"Component Carrier IQ Origin Offset Mean (dBc) : {0}\".format(component_carrier_iq_origin_offset_mean))\r\nprint(\"Component Carrier IQ Gain Imbalance Mean (dB) : {0}\".format(component_carrier_iq_gain_imbalance_mean))\r\nprint(\"Component Carrier Quadrature Error Mean (deg) : {0}\".format(component_carrier_quadrature_error_mean))\r\nprint()\r\n\r\n# Fetch traces\r\nprint('Fetching traces..', end='')\r\n_, pusch_data_constellation_trace = nr.ModAcc.Results.FetchPuschDataConstellationTrace(\"\", 10.0, None)\r\npusch_data_constellation_trace = conn.root.decompose_trace(pusch_data_constellation_trace)\r\npusch_data_constellation_trace = rpyc.utils.classic.obtain(pusch_data_constellation_trace)\r\n\r\n_, pusch_dmrs_constellation = nr.ModAcc.Results.FetchPuschDmrsConstellationTrace(\"\", 10.0, None)\r\npusch_dmrs_constellation = conn.root.decompose_trace(pusch_dmrs_constellation)\r\npusch_dmrs_constellation = rpyc.utils.classic.obtain(pusch_dmrs_constellation)\r\n\r\n_, rms_evm_per_subcarrier_mean = nr.ModAcc.Results.FetchRmsEvmPerSubcarrierMeanTrace(\"\", 10.0, None)\r\nrms_evm_per_subcarrier_mean = conn.root.decompose_trace(rms_evm_per_subcarrier_mean)\r\nrms_evm_per_subcarrier_mean = rpyc.utils.classic.obtain(rms_evm_per_subcarrier_mean)\r\n\r\n_, rms_evm_per_symbol_mean = nr.ModAcc.Results.FetchRmsEvmPerSymbolMeanTrace(\"\", 10.0, None)\r\nrms_evm_per_symbol_mean = conn.root.decompose_trace(rms_evm_per_symbol_mean)\r\nrms_evm_per_symbol_mean = rpyc.utils.classic.obtain(rms_evm_per_symbol_mean)\r\n\r\n_, spectral_flatness, spectral_flatness_lower_mask, spectral_flatness_upper_mask = \\\r\n nr.ModAcc.Results.FetchSpectralFlatnessTrace(\"\", 10.0, None, None, None)\r\nspectral_flatness = conn.root.decompose_trace(spectral_flatness)\r\nspectral_flatness = rpyc.utils.classic.obtain(spectral_flatness)\r\nspectral_flatness_lower_mask = conn.root.decompose_trace(spectral_flatness_lower_mask)\r\nspectral_flatness_lower_mask = rpyc.utils.classic.obtain(spectral_flatness_lower_mask)\r\nspectral_flatness_upper_mask = conn.root.decompose_trace(spectral_flatness_upper_mask)\r\nspectral_flatness_upper_mask = rpyc.utils.classic.obtain(spectral_flatness_upper_mask)\r\nprint('done')\r\n\r\n\r\n# plot results\r\ndef ramp(t0, dt, samples):\r\n return [t0 + dt * n for n in range(samples)]\r\n\r\n\r\nfig, axs = pyplot.subplots(2, 2)\r\naxs[0, 0].plot([iq.real for iq in pusch_dmrs_constellation], [iq.imag for iq in pusch_dmrs_constellation], 'r.')\r\naxs[0, 0].plot([iq.real for iq in pusch_data_constellation_trace], [iq.imag for iq in pusch_data_constellation_trace], 'g.')\r\naxs[0, 0].set_title('Constellation')\r\naxs[0, 1].plot(ramp(*rms_evm_per_subcarrier_mean[0:2], len(rms_evm_per_subcarrier_mean[2])), rms_evm_per_subcarrier_mean[2])\r\naxs[0, 1].set_title('RMS EVM / Subcarrier')\r\naxs[1, 0].plot(ramp(*rms_evm_per_symbol_mean[0:2], len(rms_evm_per_symbol_mean[2])), rms_evm_per_symbol_mean[2])\r\naxs[1, 0].set_title('RMS EVM / Symbol')\r\naxs[1, 1].plot(ramp(*spectral_flatness[0:2], len(spectral_flatness[2])), spectral_flatness[2])\r\naxs[1, 1].plot(ramp(*spectral_flatness_lower_mask[0:2], len(spectral_flatness_lower_mask[2])), spectral_flatness_lower_mask[2])\r\naxs[1, 1].plot(ramp(*spectral_flatness_upper_mask[0:2], len(spectral_flatness_upper_mask[2])), spectral_flatness_upper_mask[2])\r\naxs[1, 1].set_title('Spectral Flatness')\r\npyplot.show()\r\n\r\n# Close instrument sessions\r\ninstr_session.Close()\r\nrfsg_session.Close()\r\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
S-Manglik/gs-quant | [
"af22aa8574571db45ddc2a9627d25a26bd00e09b"
] | [
"gs_quant/timeseries/econometrics.py"
] | [
"# Copyright 2018 Goldman Sachs.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n# Chart Service will attempt to make public functions (not prefixed with _) from this module available. Such functions\n# should be fully documented: docstrings should describe parameters and the return value, and provide a 1-line\n# description. Type annotations should be provided for parameters.\nfrom .analysis import LagMode, lag\nfrom .statistics import *\nfrom ..errors import *\nfrom typing import Union\nimport numpy as np\nimport pandas as pd\nfrom gs_quant.api.gs.data import GsDataApi\nfrom gs_quant.data import DataContext\nfrom gs_quant.datetime.date import DayCountConvention\nfrom gs_quant.markets.securities import Asset\nfrom gs_quant.target.common import Currency\nfrom gs_quant.timeseries.datetime import align\n\n\"\"\"\nEconometrics timeseries library is for standard economic and time series analytics operations, including returns,\ndiffs, lags, volatilities and other numerical operations which are generally finance-oriented\n\"\"\"\n\n\nclass AnnualizationFactor(IntEnum):\n DAILY = 252\n WEEKLY = 52\n SEMI_MONTHLY = 26\n MONTHLY = 12\n QUARTERLY = 4\n ANNUALLY = 1\n\n\nclass SharpeAssets(Enum):\n USD = 'MAP35DA6K5B1YXGX'\n AUD = 'MAFRZWJ790MQY0EW'\n CHF = 'MAS0NN4ZX7NYXB36'\n EUR = 'MA95W0N1214395N8'\n GBP = 'MA41ZEFTWR8Q7HBM'\n JPY = 'MA8GXV3SJ0TXH1JV'\n SEK = 'MAGNZZY0GJ4TATNG'\n\n\ndef excess_returns_pure(price_series: pd.Series, spot_curve: pd.Series) -> pd.Series:\n curve, bench_curve = align(price_series, spot_curve, Interpolate.INTERSECT)\n\n e_returns = [curve.iloc[0]]\n for i in range(1, len(curve)):\n multiplier = 1 + curve.iloc[i] / curve.iloc[i - 1] - bench_curve.iloc[i] / bench_curve.iloc[i - 1]\n e_returns.append(e_returns[-1] * multiplier)\n return pd.Series(e_returns, index=curve.index)\n\n\ndef excess_returns(price_series: pd.Series, benchmark_or_rate: Union[Asset, Currency, float], *,\n day_count_convention=DayCountConvention.ACTUAL_360) -> pd.Series:\n if isinstance(benchmark_or_rate, float):\n er = [price_series.iloc[0]]\n for j in range(1, len(price_series)):\n fraction = day_count_fraction(price_series.index[j - 1], price_series.index[j], day_count_convention)\n er.append(er[-1] + price_series.iloc[j] - price_series.iloc[j - 1] * (1 + benchmark_or_rate * fraction))\n return pd.Series(er, index=price_series.index)\n\n if isinstance(benchmark_or_rate, Currency):\n try:\n marquee_id = SharpeAssets[benchmark_or_rate.value].value\n except KeyError:\n raise MqValueError(f\"unsupported currency {benchmark_or_rate}\")\n else:\n marquee_id = benchmark_or_rate.get_marquee_id()\n\n with DataContext(price_series.index[0], price_series.index[-1]):\n q = GsDataApi.build_market_data_query([marquee_id], QueryType.SPOT)\n df = GsDataApi.get_market_data(q)\n if df.empty:\n raise MqValueError(f'could not retrieve risk-free rate {marquee_id}')\n df = df[~df.index.duplicated(keep='first')] # handle bad data (duplicate rows)\n\n return excess_returns_pure(price_series, df['spot'])\n\n\ndef _annualized_return(levels: pd.Series, rolling: Union[int, pd.DateOffset],\n interpolation_method: Interpolate = Interpolate.NAN) -> pd.Series:\n if isinstance(rolling, pd.DateOffset):\n starting = [tstamp - rolling for tstamp in levels.index]\n levels = interpolate(levels, method=interpolation_method)\n points = list(\n map(lambda d, v, i: pow(v / levels.get(i, np.nan),\n 365.25 / (d - i).days) - 1,\n levels.index[1:],\n levels.values[1:], starting[1:]))\n else:\n if interpolation_method is not Interpolate.NAN:\n raise MqValueError(f'If w is not a relative date, method must be nan. You specified method: '\n f'{interpolation_method.value}.')\n starting = [0] * rolling\n starting.extend([a for a in range(1, len(levels) - rolling + 1)])\n points = list(\n map(lambda d, v, i: pow(v / levels[i], 365.25 / (d - levels.index[i]).days) - 1, levels.index[1:],\n levels.values[1:], starting[1:]))\n points.insert(0, 0)\n return pd.Series(points, index=levels.index)\n\n\ndef get_ratio_pure(er: pd.Series, w: Union[Window, int, str],\n interpolation_method: Interpolate = Interpolate.NAN) -> pd.Series:\n w = normalize_window(er, w or None) # continue to support 0 as an input for window\n ann_return = _annualized_return(er, w.w, interpolation_method=interpolation_method)\n long_enough = (er.index[-1] - w.w) >= er.index[0] if isinstance(w.w, pd.DateOffset) else w.w < len(er)\n ann_vol = volatility(er, w).iloc[1:] if long_enough else volatility(er)\n result = ann_return / ann_vol * 100\n return apply_ramp(result, w)\n\n\ndef _get_ratio(input_series: pd.Series, benchmark_or_rate: Union[Asset, float, str], w: Union[Window, int, str], *,\n day_count_convention: DayCountConvention, curve_type: CurveType = CurveType.PRICES,\n interpolation_method: Interpolate = Interpolate.NAN) -> pd.Series:\n if curve_type == CurveType.PRICES:\n er = excess_returns(input_series, benchmark_or_rate, day_count_convention=day_count_convention)\n else:\n assert curve_type == CurveType.EXCESS_RETURNS\n er = input_series\n\n return get_ratio_pure(er, w, interpolation_method)\n\n\nclass RiskFreeRateCurrency(Enum):\n USD = \"USD\"\n AUD = \"AUD\"\n CHF = \"CHF\"\n EUR = \"EUR\"\n GBP = \"GBP\"\n JPY = \"JPY\"\n SEK = \"SEK\"\n _USD = \"usd\"\n _AUD = \"aud\"\n _CHF = \"chf\"\n _EUR = \"eur\"\n _GBP = \"gbp\"\n _JPY = \"jpy\"\n _SEK = \"sek\"\n\n\n@plot_session_function\ndef excess_returns_(price_series: pd.Series, currency: RiskFreeRateCurrency = RiskFreeRateCurrency.USD) -> pd.Series:\n \"\"\"\n Calculate excess returns\n\n :param price_series: price series\n :param currency: currency for risk-free rate, defaults to USD\n :return: excess returns\n\n **Usage**\n\n Given a price series P and risk-free rate R, excess returns E are defined as:\n\n :math:`E_t = E_{t-1} + P_t - P_{t-1} * (1 + R * (D_t - D_{t-1}) / 360)`\n\n The `Actual/360 <https://en.wikipedia.org/wiki/Day_count_convention#Actual/360>`_ day count convention is used.\n\n **Examples**\n\n Get excess returns from a price series.\n\n >>> er = excess_returns(generate_series(100), USD)\n \"\"\"\n return excess_returns(price_series, Currency(currency.value), day_count_convention=DayCountConvention.ACTUAL_360)\n\n\n@plot_session_function\ndef sharpe_ratio(series: pd.Series, currency: RiskFreeRateCurrency = RiskFreeRateCurrency.USD,\n w: Union[Window, int, str] = None, curve_type: CurveType = CurveType.PRICES,\n method: Interpolate = Interpolate.NAN) -> pd.Series:\n \"\"\"\n Calculate Sharpe ratio\n\n :param series: series of prices or excess returns for an asset\n :param currency: currency for risk-free rate, defaults to USD\n :param w: Window or int: size of window and ramp up to use. e.g. Window(22, 10) where 22 is the window size\n and 10 the ramp up value. If w is a string, it should be a relative date like '1m', '1d', etc.\n Window size defaults to length of series.\n :param curve_type: whether input series is of prices or excess returns, defaults to prices\n :param method: interpolation method (default: nan). Used to calculate returns on dates without data (i.e. weekends)\n when window is a relative date. Defaults to no interpolation.\n :return: Sharpe ratio\n\n **Usage**\n\n Given a price series P, risk-free rate R, and window of size w returns the rolling\n `Sharpe ratio <https://en.wikipedia.org/wiki/Sharpe_ratio>`_ S:\n\n :math:`S_t = \\\\frac{(E_t / E_{t-w+1})^{365.25 / (D_t - D_{t-w})}-1}{volatility(E, w)_t}`\n\n Excess returns E are defined as:\n\n :math:`E_t = E_{t-1} + P_t - P_{t-1} * (1 + R * (D_t - D_{t-1}) / 360)`\n\n where D is the date for a data point. The\n `Actual/360 <https://en.wikipedia.org/wiki/Day_count_convention#Actual/360>`_ day count convention is used.\n\n **Examples**\n\n Get rolling sharpe ratio of a price series (with window of 22).\n\n >>> sr = sharpe_ratio(generate_series(365, END_TODAY), USD, 22, PRICES)\n\n **See also**\n\n :func:`volatility`\n \"\"\"\n return _get_ratio(series, Currency(currency.value), w, day_count_convention=DayCountConvention.ACTUAL_360,\n curve_type=curve_type, interpolation_method=method)\n\n\n@plot_function\ndef returns(series: pd.Series, obs: Union[Window, int, str] = 1, type: Returns = Returns.SIMPLE) -> pd.Series:\n \"\"\"\n Calculate returns from price series\n\n :param series: time series of prices\n :param obs: number of observations or relative date e.g. 3d, 1w, 1m\n :param type: returns type: simple, logarithmic or absolute\n :return: date-based time series of return\n\n **Usage**\n\n Compute returns series from price levels, based on the value of *type*:\n\n =========== =============================\n Type Description\n =========== =============================\n simple Simple arithmetic returns\n logarithmic Logarithmic returns\n absolute Absolute returns\n =========== =============================\n\n *Simple*\n\n Simple geometric change in asset prices, which can be aggregated across assets\n\n :math:`Y_t = \\\\frac{X_t}{X_{t-obs}} - 1`\n\n where :math:`X_t` is the asset price at time :math:`t`\n\n *Logarithmic*\n\n Natural logarithm of asset price changes, which can be aggregated through time\n\n :math:`Y_t = log(X_t) - log(X_{t-obs})`\n\n where :math:`X_t` is the asset price at time :math:`t`\n\n *Absolute*\n\n Absolute change in asset prices\n\n :math:`Y_t = X_t - X_{t-obs}`\n\n where :math:`X_t` is the asset price at time :math:`t`\n\n **Examples**\n\n Generate price series and take compute returns\n\n >>> prices = generate_series(100)\n >>> returns = returns(prices)\n\n **See also**\n\n :func:`prices`\n \"\"\"\n\n if series.size < 1:\n return series\n\n shifted_series = lag(series, obs, LagMode.TRUNCATE)\n\n if type == Returns.SIMPLE:\n ret_series = series / shifted_series - 1\n elif type == Returns.LOGARITHMIC:\n ret_series = series.apply(math.log) - shifted_series.apply(math.log)\n elif type == Returns.ABSOLUTE:\n ret_series = series - shifted_series\n else:\n raise MqValueError('Unknown returns type (use simple / logarithmic / absolute)')\n\n return ret_series\n\n\n@plot_function\ndef prices(series: pd.Series, initial: int = 1, type: Returns = Returns.SIMPLE) -> pd.Series:\n \"\"\"\n Calculate price levels from returns series\n\n :param series: time series of returns\n :param initial: initial price level\n :param type: returns type: simple, logarithmic or absolute\n :return: date-based time series of return\n\n **Usage**\n\n Compute price levels from returns series, based on the value of *type*:\n\n =========== =============================\n Type Description\n =========== =============================\n simple Simple arithmetic returns\n logarithmic Logarithmic returns\n absolute Absolute returns\n =========== =============================\n\n *Simple*\n\n Compute asset price series from simple returns:\n\n :math:`Y_t = (1 + X_{t-1}) Y_{t-1}`\n\n where :math:`X_t` is the asset price at time :math:`t` and :math:`Y_0 = initial`\n\n *Logarithmic*\n\n Compute asset price series from logarithmic returns:\n\n :math:`Y_t = e^{X_{t-1}} Y_{t-1}`\n\n where :math:`X_t` is the asset price at time :math:`t` and :math:`Y_0 = initial`\n\n *Absolute*\n\n Compute asset price series from absolute returns:\n\n :math:`Y_t = X_{t-1} + Y_{t-1}`\n\n where :math:`X_t` is the asset price at time :math:`t` and :math:`Y_0 = initial`\n\n **Examples**\n\n Generate price series and take compute returns\n\n >>> series = generate_series(100)\n >>> returns = prices(returns(series))\n\n **See also**\n\n :func:`returns` :func:`product` :func:`exp`\n \"\"\"\n\n if series.size < 1:\n return series\n\n if type == Returns.SIMPLE:\n return product(1 + series) * initial\n elif type == Returns.LOGARITHMIC:\n return product(series.apply(math.exp)) * initial\n elif type == Returns.ABSOLUTE:\n return sum_(series) + initial\n else:\n raise MqValueError('Unknown returns type (use simple / Logarithmic / absolute)')\n\n\n@plot_function\ndef index(x: pd.Series, initial: int = 1) -> pd.Series:\n \"\"\"\n Geometric series normalization\n\n :param x: time series\n :param initial: initial value\n :return: normalized time series\n\n **Usage**\n\n Divides every value in x by the initial value of x:\n\n :math:`Y_t = initial * X_t / X_0`\n\n where :math:`X_0` is the first value in the series\n\n **Examples**\n\n Normalize series to 1:\n\n >>> series = generate_series(100)\n >>> returns = index(series)\n\n **See also**\n\n :func:`returns`\n\n \"\"\"\n i = x.first_valid_index()\n if not x[i]:\n raise MqValueError('Divide by zero error. Ensure that the first value of series passed to index(...) '\n 'is non-zero')\n return pd.Series(dtype=float) if i is None else initial * x / x[i]\n\n\n@plot_function\ndef change(x: pd.Series) -> pd.Series:\n \"\"\"\n Arithmetic series normalization\n\n :param x: time series\n :return: normalized time series\n\n **Usage**\n\n Compute difference of every value from the initial value of x:\n\n :math:`Y_t = X_t - X_0`\n\n where :math:`X_0` is the first value in the series\n\n **Examples**\n\n Change in level from initial value:\n\n >>> series = generate_series(100)\n >>> returns = change(series)\n\n **See also**\n\n :func:`index`\n\n \"\"\"\n return x - x[0]\n\n\ndef _get_annualization_factor(x):\n prev_idx = x.index[0]\n distances = []\n\n for idx, value in x.iloc[1:].iteritems():\n d = (idx - prev_idx).days\n if d == 0:\n raise MqValueError('multiple data points on same date')\n distances.append(d)\n prev_idx = idx\n\n average_distance = numpy.average(distances)\n if average_distance < 2.1:\n factor = AnnualizationFactor.DAILY\n elif 6 <= average_distance < 8:\n factor = AnnualizationFactor.WEEKLY\n elif 14 <= average_distance < 17:\n factor = AnnualizationFactor.SEMI_MONTHLY\n elif 25 <= average_distance < 35:\n factor = AnnualizationFactor.MONTHLY\n elif 85 <= average_distance < 97:\n factor = AnnualizationFactor.QUARTERLY\n elif 360 <= average_distance < 386:\n factor = AnnualizationFactor.ANNUALLY\n else:\n raise MqValueError('Cannot infer annualization factor, average distance: ' + str(average_distance))\n return factor\n\n\n@plot_function\ndef annualize(x: pd.Series) -> pd.Series:\n \"\"\"\n Annualize series based on sample observation frequency\n\n :param x: time series of prices\n :return: date-based time series of annualized values\n\n **Usage**\n\n Based on number of days between observations, will determine an annualization factor and then adjust values\n accordingly. Useful for annualizing daily or monthly returns\n\n :math:`Y_t = X_t * \\sqrt{F}`\n\n Annualization factors as follows, based on period implied by observations:\n\n ========= =============================\n Period Annualization Factor (F)\n ========= =============================\n Daily :math:`252`\n Weekly :math:`52`\n Bi-Weekly :math:`26`\n Monthly :math:`12`\n Quarterly :math:`4`\n Annually :math:`1`\n ========= =============================\n\n **Examples**\n\n Annualize daily returns series:\n\n >>> prices = generate_series(100)\n >>> ann = annualize(returns(prices))\n\n **See also**\n\n :func:`returns`\n \"\"\"\n\n factor: int = _get_annualization_factor(x)\n return x * math.sqrt(factor)\n\n\n@plot_function\ndef volatility(x: pd.Series, w: Union[Window, int, str] = Window(None, 0),\n returns_type: Returns = Returns.SIMPLE) -> pd.Series:\n \"\"\"\n Realized volatility of price series\n\n :param x: time series of prices\n :param w: Window or int: size of window and ramp up to use. e.g. Window(22, 10) where 22 is the window size\n and 10 the ramp up value. If w is a string, it should be a relative date like '1m', '1d', etc.\n Window size defaults to length of series.\n :param returns_type: returns type: simple, logarithmic or absolute\n :return: date-based time series of return\n\n **Usage**\n\n Calculate rolling annualized realized volatility of a price series over a given window. Annual volatility of 20% is\n returned as 20.0:\n\n :math:`Y_t = \\\\sqrt{\\\\frac{1}{N-1} \\\\sum_{i=t-w+1}^t (R_t - \\\\overline{R_t})^2} * \\\\sqrt{252} * 100`\n\n where N is the number of observations in each rolling window :math:`w`, :math:`R_t` is the return on time\n :math:`t` based on *returns_type*\n\n =========== =======================================================\n Type Description\n =========== =======================================================\n simple Simple geometric change in asset prices:\n :math:`R_t = \\\\frac{X_t}{X_{t-1}} - 1`\n where :math:`X_t` is the asset price at time :math:`t`\n logarithmic Natural logarithm of asset price changes:\n :math:`R_t = log(X_t) - log(X_{t-1})`\n where :math:`X_t` is the asset price at time :math:`t`\n absolute Absolute change in asset prices:\n :math:`Y_t = X_t - X_{t-obs}`\n where :math:`X_t` is the asset price at time :math:`t`\n =========== =======================================================\n\n and :math:`\\overline{R_t}` is the mean value over the same window:\n\n :math:`\\overline{R_t} = \\\\frac{\\sum_{i=t-w+1}^{t} R_t}{N}`\n\n If window is not provided, computes realized volatility over the full series\n\n **Examples**\n\n Compute rolling :math:`1` month (:math:`22` business day) annualized volatility of price series\n\n >>> series = generate_series(100)\n >>> vol_series = volatility(series, 22)\n >>> vol_series = volatility(series, Window(22, 30))\n\n **See also**\n\n :func:`std` :func:`annualize` :func:`returns`\n\n \"\"\"\n w = normalize_window(x, w)\n\n if x.size < 1:\n return x\n\n return apply_ramp(annualize(std(returns(x, type=returns_type), Window(w.w, 0))).mul(100), w)\n\n\n@plot_function\ndef correlation(x: pd.Series, y: pd.Series,\n w: Union[Window, int, str] = Window(None, 0), type_: SeriesType = SeriesType.PRICES) -> pd.Series:\n \"\"\"\n Rolling correlation of two price series\n\n :param x: price series\n :param y: price series\n :param w: Window, int, or str: size of window and ramp up to use. e.g. Window(22, 10) where 22 is the window size\n and 10 the ramp up value. If w is a string, it should be a relative date like '1m', '1d', etc.\n Window size defaults to length of series.\n :param type_: type of both input series: prices or returns\n :return: date-based time series of correlation\n\n **Usage**\n\n Calculate rolling `realized correlation <https://en.wikipedia.org/wiki/Correlation_and_dependence>`_,\n :math:`\\\\rho_t` of two price series over a given window:\n\n :math:`\\\\rho_t = \\\\frac{\\sum_{i=t-w+1}^t (R_t - \\overline{R_t})(Y_t - \\overline{S_t})}{(N-1)\\sigma R_t\\sigma S_t}`\n\n where N is the number of observations in each rolling window, :math:`w`, and :math:`R_t` and :math:`S_t` are the\n simple returns for each series on time :math:`t`\n\n If prices are provided:\n\n :math:`R_t = \\\\frac{X_t}{X_{t-1}} - 1` and :math:`S_t = \\\\frac{Y_t}{Y_{t-1}} - 1`\n\n If returns are provided:\n\n :math:`R_t = X_t` and :math:`S_t = Y_t`\n\n :math:`\\overline{R_t}`, :math:`\\overline{S_t}` are the mean values, and :math:`\\sigma R_{t}` and\n :math:`\\sigma S_{t}` are the sample standard deviations, of series\n :math:`R_t` and :math:`S_t` over the same window\n\n If window is not provided, computes realized correlation over the full series\n\n **Examples**\n\n Compute rolling :math:`1` month (:math:`22` business day) correlation of price series\n\n >>> series1 = generate_series(100)\n >>> series2 = generate_series(100)\n >>> corr = correlation(series1, series2, 22)\n\n **See also**\n\n :func:`std` :func:`returns`\n\n \"\"\"\n w = normalize_window(x, w)\n\n if x.size < 1:\n return x\n\n given_prices = type_ == SeriesType.PRICES\n ret_1 = returns(x) if given_prices else x\n ret_2 = returns(y) if given_prices else y\n\n clean_ret1 = ret_1.dropna()\n clean_ret2 = ret_2.dropna()\n\n if isinstance(w.w, pd.DateOffset):\n values = [clean_ret1.loc[(clean_ret1.index > idx - w.w) & (clean_ret1.index <= idx)].corr(clean_ret2)\n for idx in clean_ret1.index]\n corr = pd.Series(values, index=clean_ret1.index)\n else:\n corr = clean_ret1.rolling(w.w, 0).corr(clean_ret2)\n\n return apply_ramp(interpolate(corr, x, Interpolate.NAN), w)\n\n\n@plot_function\ndef beta(x: pd.Series, b: pd.Series, w: Union[Window, int, str] = Window(None, 0), prices: bool = True) -> pd.Series:\n \"\"\"\n Rolling beta of price series and benchmark\n\n :param x: time series of prices\n :param b: time series of benchmark prices\n :param w: Window, int, or str: size of window and ramp up to use. e.g. Window(22, 10) where 22 is the window size\n and 10 the ramp up value. If w is a string, it should be a relative date like '1m', '1d', etc.\n Window size defaults to length of series.\n :param prices: True if input series are prices, False if they are returns\n :return: date-based time series of beta\n\n **Usage**\n\n Calculate rolling `beta <https://en.wikipedia.org/wiki/Beta_(finance)>`_,\n :math:`\\\\beta_t` of a series to a benchmark over a given window:\n\n :math:`R_t = \\\\alpha_t + \\\\beta S_t + \\\\epsilon_t`\n\n Calculated as:\n\n :math:`\\\\beta_t = \\\\frac{\\\\sum_{i=t-w+1}^t Cov(R_t, S_t)}{Var(S_t)}`\n\n where N is the number of observations in each rolling window, :math:`w`, and :math:`R_t` and :math:`S_t` are the\n simple returns for each series on time :math:`t`:\n\n :math:`R_t = \\\\frac{X_t}{X_{t-1}} - 1` and :math:`S_t = \\\\frac{b_t}{b_{t-1}} - 1`\n\n If prices = False, assumes returns are provided:\n\n :math:`R_t = X_t` and :math:`S_t = b_t`\n\n :math:`Cov(R_t, S_t)` and :math:`Var(S_t)` are the covariance and variance of the series\n :math:`R_t` and :math:`S_t` over the same window\n\n If window is not provided, computes beta over the full series\n\n **Examples**\n\n Compute rolling :math:`1` month (:math:`22` business day) beta of two price series\n\n >>> series = generate_series(100)\n >>> benchmark = generate_series(100)\n >>> b = beta(series, benchmark, 22)\n\n **See also**\n\n :func:`var` :func:`cov` :func:`correlation` :func:`returns`\n \"\"\"\n if not isinstance(prices, bool):\n raise MqTypeError('expected a boolean value for \"prices\"')\n\n w = normalize_window(x, w)\n\n ret_series = returns(x) if prices else x\n ret_benchmark = returns(b) if prices else b\n\n if isinstance(w.w, pd.DateOffset):\n series_index = ret_series.index.intersection(ret_benchmark.index)\n size = len(series_index)\n ret_series = ret_series.loc[series_index]\n benchmark_series = ret_benchmark.loc[series_index]\n\n ret_values = np.array(ret_series.values, dtype=np.double)\n benchmark_values = np.array(benchmark_series.values, dtype=np.double)\n\n cov_results = np.empty(size, dtype=np.double)\n var_results = np.empty(size, dtype=np.double)\n\n offset = w.w\n start = 0\n for i in range(1, size):\n min_index_value = series_index[i] - offset\n for idx in range(start, i + 1):\n if series_index[idx] > min_index_value:\n start = idx\n break\n\n sub_benchmark_values = benchmark_values[start:i + 1]\n var_results[i] = np.var(sub_benchmark_values, ddof=1)\n cov_results[i] = np.cov(ret_values[start:i + 1], sub_benchmark_values, ddof=1)[0][1]\n\n result = pd.Series(cov_results / var_results, index=series_index, dtype=np.double)\n else:\n cov = ret_series.rolling(w.w, 0).cov(ret_benchmark)\n result = cov / ret_benchmark.rolling(w.w, 0).var()\n\n # do not compute initial values as they may be extreme when sample size is small\n result[0:3] = np.nan\n return apply_ramp(interpolate(result, x, Interpolate.NAN), w)\n\n\n@plot_function\ndef max_drawdown(x: pd.Series, w: Union[Window, int, str] = Window(None, 0)) -> pd.Series:\n \"\"\"\n Compute the maximum peak to trough drawdown over a rolling window as a ratio.\n\n i.e. if the max drawdown for a period is 20%, this function will return 0.2.\n\n :param x: time series\n :param w: Window, int, or str: size of window and ramp up to use. e.g. Window(22, 10) where 22 is the window size\n and 10 the ramp up value. If w is a string, it should be a relative date like '1m', '1d', etc.\n Window size defaults to length of series.\n :return: time series of rolling maximum drawdown\n\n **Examples**\n\n Compute the maximum peak to trough `drawdown <https://en.wikipedia.org/wiki/Drawdown_(economics)>`_\n\n >>> series = generate_series(100)\n >>> max_drawdown(series)\n\n **See also**\n\n :func:`returns`\n\n \"\"\"\n w = normalize_window(x, w)\n if isinstance(w.w, pd.DateOffset):\n scores = pd.Series([x[idx] / x.loc[(x.index > idx - w.w) & (x.index <= idx)].max() - 1 for idx in x.index],\n index=x.index)\n result = pd.Series([scores.loc[(scores.index > idx - w.w) & (scores.index <= idx)].min()\n for idx in scores.index], index=scores.index)\n else:\n rolling_max = x.rolling(w.w, 0).max()\n result = (x / rolling_max - 1).rolling(w.w, 0).min()\n return apply_ramp(result, w)\n"
] | [
[
"pandas.Series",
"numpy.cov",
"numpy.var",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SimplisticCode/ODA-ML | [
"2c66bd7b8cb0d98dd9606a8a5439606b832f56bf"
] | [
"NearestCentroid.py"
] | [
"from collections import defaultdict\nimport numpy as np\nfrom numpy import sort\n\n\nclass NearestCentroid():\n def __init__(self):\n self.centroids = None\n\n def fit(self, X, y):\n subrows = defaultdict(list)\n for i in range(len(y)):\n # Collect indices of exemplars for the given class label\n subrows[y[i]].append(i)\n\n centroids = []\n for index, label in enumerate(subrows.keys()):\n exemplars = X[subrows[label]]\n # compute centroid for exemplars\n centroid = self.centroid(exemplars)\n centroids.append({\"centroid\": centroid, \"label\": label})\n self.centroids = centroids\n return self\n\n def centroid(self, X):\n centroid = X.mean(axis=0)\n return centroid\n\n def predict(self, X):\n results = []\n for sample in X:\n distances = []\n for centroid in self.centroids:\n distances.append((np.linalg.norm(sample - centroid[\"centroid\"]), centroid[\"label\"]))\n distances = sorted(distances, key=lambda x: x[0])\n results.append(distances[0][1])\n \n return results\n\n\n\n\n\n"
] | [
[
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
datduong/pytorch-image-models | [
"05c9b52ca65b01e57f8cea2b6447882488aba4f6"
] | [
"validate.py"
] | [
"#!/usr/bin/env python\n\"\"\" ImageNet Validation Script\n\nThis is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained\nmodels or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes\ncanonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.\n\nHacked together by Ross Wightman (https://github.com/rwightman)\n\"\"\"\nimport argparse\nimport os\nimport csv\nimport glob\nimport time\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom collections import OrderedDict\nfrom contextlib import suppress\n\nfrom timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models\nfrom timm.data import Dataset, DatasetTar, create_loader, resolve_data_config, RealLabelsImagenet\nfrom timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy\n\nhas_apex = False\ntry:\n from apex import amp\n has_apex = True\nexcept ImportError:\n pass\n\nfrom timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models\nfrom timm.models.layers.classifier import create_classifier_layerfc\n\nfrom timm.data import Dataset, DatasetTar, create_loader, resolve_data_config, RealLabelsImagenet\nfrom timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging\nhas_native_amp = False\ntry:\n if getattr(torch.cuda.amp, 'autocast') is not None:\n has_native_amp = True\nexcept AttributeError:\n pass\n\ntorch.backends.cudnn.benchmark = True\n_logger = logging.getLogger('validate')\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--model', '-m', metavar='MODEL', default='dpn92',\n help='model architecture (default: dpn92)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 2)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--img-size', default=None, type=int,\n metavar='N', help='Input image dimension, uses model default if empty')\nparser.add_argument('--crop-pct', default=None, type=float,\n metavar='N', help='Input image center crop pct')\nparser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override mean pixel value of dataset')\nparser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',\n help='Override std deviation of of dataset')\nparser.add_argument('--interpolation', default='', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\nparser.add_argument('--num-classes', type=int, default=1000,\n help='Number classes in dataset')\nparser.add_argument('--class-map', default='', type=str, metavar='FILENAME',\n help='path to class to idx mapping file (default: \"\")')\nparser.add_argument('--gp', default=None, type=str, metavar='POOL',\n help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')\nparser.add_argument('--log-freq', default=10, type=int,\n metavar='N', help='batch logging frequency (default: 10)')\nparser.add_argument('--checkpoint', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--num-gpu', type=int, default=1,\n help='Number of GPUS to use')\nparser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',\n help='disable test time pool')\nparser.add_argument('--no-prefetcher', action='store_true', default=False,\n help='disable fast prefetcher')\nparser.add_argument('--pin-mem', action='store_true', default=False,\n help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\nparser.add_argument('--channels-last', action='store_true', default=False,\n help='Use channels_last memory layout')\nparser.add_argument('--amp', action='store_true', default=False,\n help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')\nparser.add_argument('--apex-amp', action='store_true', default=False,\n help='Use NVIDIA Apex AMP mixed precision')\nparser.add_argument('--native-amp', action='store_true', default=False,\n help='Use Native Torch AMP mixed precision')\nparser.add_argument('--tf-preprocessing', action='store_true', default=False,\n help='Use Tensorflow preprocessing pipeline (require CPU TF installed')\nparser.add_argument('--use-ema', dest='use_ema', action='store_true',\n help='use ema version of weights if present')\nparser.add_argument('--torchscript', dest='torchscript', action='store_true',\n help='convert model torchscript for inference')\nparser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true',\n help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance')\nparser.add_argument('--results-file', default='', type=str, metavar='FILENAME',\n help='Output csv file for validation results (summary)')\nparser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',\n help='Real labels JSON file for imagenet evaluation')\nparser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',\n help='Valid label indices txt file for validation of partial label space')\n\n# !\nparser.add_argument(\"--create_classifier_layerfc\", action='store_true', default=False, # ! should not call this again ??\n help='add more layers to classification layer')\n\n\n\ndef validate(args):\n # might as well try to validate something\n args.pretrained = args.pretrained or not args.checkpoint\n args.prefetcher = not args.no_prefetcher\n amp_autocast = suppress # do nothing\n if args.amp:\n if has_apex:\n args.apex_amp = True\n elif has_native_amp:\n args.native_amp = True\n else:\n _logger.warning(\"Neither APEX or Native Torch AMP is available, using FP32.\")\n assert not args.apex_amp or not args.native_amp, \"Only one AMP mode should be set.\"\n if args.native_amp:\n amp_autocast = torch.cuda.amp.autocast\n\n if args.legacy_jit:\n set_jit_legacy()\n\n # create model\n model = create_model(\n args.model,\n pretrained=args.pretrained,\n num_classes=args.num_classes,\n in_chans=3,\n global_pool=args.gp,\n scriptable=args.torchscript)\n\n # ! add more layer to classifier layer\n if args.create_classifier_layerfc: \n model.global_pool, model.classifier = create_classifier_layerfc(model.num_features, model.num_classes)\n\n if args.checkpoint:\n load_checkpoint(model, args.checkpoint, args.use_ema)\n\n param_count = sum([m.numel() for m in model.parameters()])\n _logger.info('Model %s created, param count: %d' % (args.model, param_count))\n\n data_config = resolve_data_config(vars(args), model=model)\n model, test_time_pool = model, False if args.no_test_pool else apply_test_time_pool(model, data_config)\n\n if args.torchscript:\n torch.jit.optimized_execution(True)\n model = torch.jit.script(model)\n\n model = model.cuda()\n if args.apex_amp:\n model = amp.initialize(model, opt_level='O1')\n\n if args.channels_last:\n model = model.to(memory_format=torch.channels_last)\n\n if args.num_gpu > 1:\n model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))\n\n criterion = nn.CrossEntropyLoss().cuda()\n\n if os.path.splitext(args.data)[1] == '.tar' and os.path.isfile(args.data):\n dataset = DatasetTar(args.data, load_bytes=args.tf_preprocessing, class_map=args.class_map)\n else:\n dataset = Dataset(args.data, load_bytes=args.tf_preprocessing, class_map=args.class_map)\n\n if args.valid_labels:\n with open(args.valid_labels, 'r') as f:\n valid_labels = {int(line.rstrip()) for line in f}\n valid_labels = [i in valid_labels for i in range(args.num_classes)]\n else:\n valid_labels = None\n\n if args.real_labels:\n real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)\n else:\n real_labels = None\n\n crop_pct = 1.0 if test_time_pool else data_config['crop_pct']\n loader = create_loader(\n dataset,\n input_size=data_config['input_size'],\n batch_size=args.batch_size,\n use_prefetcher=args.prefetcher,\n interpolation=data_config['interpolation'],\n mean=data_config['mean'],\n std=data_config['std'],\n num_workers=args.workers,\n crop_pct=crop_pct,\n pin_memory=args.pin_mem,\n tf_preprocessing=args.tf_preprocessing)\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n model.eval()\n with torch.no_grad():\n # warmup, reduce variability of first batch time, especially for comparing torchscript vs non\n input = torch.randn((args.batch_size,) + data_config['input_size']).cuda()\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n model(input)\n end = time.time()\n for batch_idx, (input, target) in enumerate(loader):\n if args.no_prefetcher:\n target = target.cuda()\n input = input.cuda()\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n\n # compute output\n with amp_autocast():\n output = model(input)\n\n if valid_labels is not None:\n output = output[:, valid_labels]\n loss = criterion(output, target)\n\n if real_labels is not None:\n real_labels.add_result(output)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1.item(), input.size(0))\n top5.update(acc5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_idx % args.log_freq == 0:\n _logger.info(\n 'Test: [{0:>4d}/{1}] '\n 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '\n 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '\n 'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '\n 'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(\n batch_idx, len(loader), batch_time=batch_time,\n rate_avg=input.size(0) / batch_time.avg,\n loss=losses, top1=top1, top5=top5))\n\n if real_labels is not None:\n # real labels mode replaces topk values at the end\n top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)\n else:\n top1a, top5a = top1.avg, top5.avg\n results = OrderedDict(\n top1=round(top1a, 4), top1_err=round(100 - top1a, 4),\n top5=round(top5a, 4), top5_err=round(100 - top5a, 4),\n param_count=round(param_count / 1e6, 2),\n img_size=data_config['input_size'][-1],\n cropt_pct=crop_pct,\n interpolation=data_config['interpolation'])\n\n _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(\n results['top1'], results['top1_err'], results['top5'], results['top5_err']))\n\n return results\n\n\ndef main():\n setup_default_logging()\n args = parser.parse_args()\n model_cfgs = []\n model_names = []\n if os.path.isdir(args.checkpoint):\n # validate all checkpoints in a path with same model\n checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')\n checkpoints += glob.glob(args.checkpoint + '/*.pth')\n model_names = list_models(args.model)\n model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]\n else:\n if args.model == 'all':\n # validate all models in a list of names with pretrained checkpoints\n args.pretrained = True\n model_names = list_models(pretrained=True)\n model_cfgs = [(n, '') for n in model_names]\n elif not is_model(args.model):\n # model name doesn't exist, try as wildcard filter\n model_names = list_models(args.model)\n model_cfgs = [(n, '') for n in model_names]\n\n if len(model_cfgs):\n results_file = args.results_file or './results-all.csv'\n _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))\n results = []\n try:\n start_batch_size = args.batch_size\n for m, c in model_cfgs:\n batch_size = start_batch_size\n args.model = m\n args.checkpoint = c\n result = OrderedDict(model=args.model)\n r = {}\n while not r and batch_size >= args.num_gpu:\n torch.cuda.empty_cache()\n try:\n args.batch_size = batch_size\n print('Validating with batch size: %d' % args.batch_size)\n r = validate(args)\n except RuntimeError as e:\n if batch_size <= args.num_gpu:\n print(\"Validation failed with no ability to reduce batch size. Exiting.\")\n raise e\n batch_size = max(batch_size // 2, args.num_gpu)\n print(\"Validation failed, reducing batch size by 50%\")\n result.update(r)\n if args.checkpoint:\n result['checkpoint'] = args.checkpoint\n results.append(result)\n except KeyboardInterrupt as e:\n pass\n results = sorted(results, key=lambda x: x['top1'], reverse=True)\n if len(results):\n write_results(results_file, results)\n else:\n validate(args)\n\n\ndef write_results(results_file, results):\n with open(results_file, mode='w') as cf:\n dw = csv.DictWriter(cf, fieldnames=results[0].keys())\n dw.writeheader()\n for r in results:\n dw.writerow(r)\n cf.flush()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.jit.script",
"torch.nn.CrossEntropyLoss",
"torch.jit.optimized_execution",
"torch.randn",
"torch.cuda.empty_cache",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yx9527/insightface | [
"4eae1d4e0d4232789df1968d099cd6219752a4a3"
] | [
"detection/scrfd/mmdet/datasets/retinaface.py"
] | [
"import itertools\nimport logging\nimport os.path as osp\nimport tempfile\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom mmdet.core import eval_recalls\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\ntry:\n import mmpycocotools\n if not hasattr(mmpycocotools, '__sphinx_mock__'): # for doc generation\n assert mmpycocotools.__version__ >= '12.0.2'\nexcept AssertionError:\n raise AssertionError('Incompatible version of pycocotools is installed. '\n 'Run pip uninstall pycocotools first. Then run pip '\n 'install mmpycocotools to install open-mmlab forked '\n 'pycocotools.')\n\[email protected]_module()\nclass RetinaFaceDataset(CustomDataset):\n\n CLASSES = ('FG', )\n def __init__(self, min_size=None, **kwargs):\n self.NK = 5\n self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}\n self.min_size = min_size\n self.gt_path = kwargs.get('gt_path')\n super(RetinaFaceDataset, self).__init__(**kwargs)\n #print(self.cat2label)\n\n def _parse_ann_line(self, line):\n values = [float(x) for x in line.strip().split()]\n bbox = np.array(values[0:4], dtype=np.float32 )\n kps = np.zeros( (self.NK,3), dtype=np.float32 )\n ignore = False\n if self.min_size is not None:\n assert not self.test_mode\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n if w < self.min_size or h < self.min_size:\n ignore = True\n if len(values)>4:\n if len(values)>5:\n #print(values)\n kps = np.array( values[4:19], dtype=np.float32 ).reshape((self.NK,3))\n for li in range(kps.shape[0]):\n if (kps[li,:]==-1).all():\n #assert kps[li][2]==-1\n kps[li][2] = 0.0 #weight = 0, ignore\n else:\n assert kps[li][2]>=0\n kps[li][2] = 1.0 #weight\n #if li==0:\n # landmark_num+=1\n #if kps[li][2]==0.0:#visible\n # kps[li][2] = 1.0\n #else:\n # kps[li][2] = 0.0\n else: #len(values)==5\n if not ignore:\n ignore = (values[4]==1)\n else:\n assert self.test_mode\n\n return dict(bbox=bbox, kps=kps, ignore=ignore, cat='FG')\n\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n \"\"\"\n name = None\n bbox_map = {}\n for line in open(ann_file, 'r'):\n line = line.strip()\n if line.startswith('#'):\n value = line[1:].strip().split()\n name = value[0]\n width = int(value[1])\n height = int(value[2])\n\n bbox_map[name] = dict(width=width, height=height, objs=[])\n continue\n assert name is not None\n assert name in bbox_map\n bbox_map[name]['objs'].append(line)\n print('origin image size', len(bbox_map))\n data_infos = []\n for name in bbox_map:\n item = bbox_map[name]\n width = item['width']\n height = item['height']\n vals = item['objs']\n objs = []\n for line in vals:\n data = self._parse_ann_line(line)\n if data is None:\n continue\n objs.append( data ) #data is (bbox, kps, cat)\n if len(objs)==0 and not self.test_mode:\n continue\n data_infos.append(dict(filename=name, width = width, height=height, objs = objs))\n return data_infos\n\n\n def get_ann_info(self, idx):\n \"\"\"Get COCO annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n data_info = self.data_infos[idx]\n\n bboxes = []\n keypointss = []\n labels = []\n bboxes_ignore = []\n labels_ignore = []\n for obj in data_info['objs']:\n label = self.cat2label[obj['cat']]\n bbox = obj['bbox']\n keypoints = obj['kps']\n ignore = obj['ignore']\n if ignore:\n bboxes_ignore.append(bbox)\n labels_ignore.append(label)\n else:\n bboxes.append(bbox)\n labels.append(label)\n keypointss.append(keypoints)\n if not bboxes:\n bboxes = np.zeros((0, 4))\n labels = np.zeros((0, ))\n keypointss = np.zeros((0, self.NK, 3))\n else:\n #bboxes = np.array(bboxes, ndmin=2) - 1\n bboxes = np.array(bboxes, ndmin=2)\n labels = np.array(labels)\n keypointss = np.array(keypointss, ndmin=3)\n if not bboxes_ignore:\n bboxes_ignore = np.zeros((0, 4))\n labels_ignore = np.zeros((0, ))\n else:\n #bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1\n bboxes_ignore = np.array(bboxes_ignore, ndmin=2)\n labels_ignore = np.array(labels_ignore)\n ann = dict(\n bboxes=bboxes.astype(np.float32),\n labels=labels.astype(np.int64),\n keypointss = keypointss.astype(np.float32),\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64))\n return ann\n\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Octave-byte/pAPY | [
"cb077aeb49228262daa4c53da9dc88d0ab89d2a2"
] | [
"script_supabase_stat_apy.py"
] | [
"# -*- coding: utf-8 -*-\r\n\r\n# Import the os module\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import date\r\nimport requests\r\nfrom pandas import json_normalize \r\nimport uuid\r\n\r\n###############\r\n## Data prep\r\n###############\r\n\r\nr = requests.get('https://iunmfujgowtioifurpcp.supabase.co/rest/v1/historical_price?select=*', headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjQzMzEyNjE1LCJleHAiOjE5NTg4ODg2MTV9.SZ9vqTxuLt9-kEPXvVzg5RTyzL-3VFn5QvIZirpBgO8', 'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjQzMzEyNjE1LCJleHAiOjE5NTg4ODg2MTV9.SZ9vqTxuLt9-kEPXvVzg5RTyzL-3VFn5QvIZirpBgO8'})\r\nr2 = requests.get('https://iunmfujgowtioifurpcp.supabase.co/rest/v1/historical_tvl?select=*', headers={'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjQzMzEyNjE1LCJleHAiOjE5NTg4ODg2MTV9.SZ9vqTxuLt9-kEPXvVzg5RTyzL-3VFn5QvIZirpBgO8', 'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjQzMzEyNjE1LCJleHAiOjE5NTg4ODg2MTV9.SZ9vqTxuLt9-kEPXvVzg5RTyzL-3VFn5QvIZirpBgO8'})\r\n\r\npriceYearn = r.json()\r\ntvlYearn = r2.json()\r\nprice = json_normalize(priceYearn)\r\ntvl = json_normalize(tvlYearn)\r\n\r\n###################\r\n#APY table\r\n###################\r\n\r\n\r\ncombined = pd.merge( price, tvl,on=['protocol_id', 'blocktime', 'address', 'timestamp'])\r\n\r\n\r\n\r\ncombined['dailyAPY'] = (combined.groupby('address')['price']\r\n .apply(pd.Series.pct_change)*365)\r\n\r\ncombined['weeklyAPY'] = (combined.groupby('address')['price']\r\n .apply(lambda dfi : dfi.pct_change(periods=7))*365/7)\r\n\r\ncombined['monthlyAPY'] = (combined.groupby('address')['price']\r\n .apply(lambda dfi : dfi.pct_change(periods=30))*365/30)\r\n\r\n\r\ncombined['daily'] = (combined.groupby('address')['price']\r\n .apply(pd.Series.pct_change))\r\n\r\ncombined['count'] = combined.groupby(\"address\").apply(lambda x: np.arange(1, len(x)+1))[0]\r\n\r\n\r\ncombined['inceptionAPY'] = ((1 + combined.daily).cumprod() - 1)*365/(combined['count'])\r\n\r\ncombined['id'] = combined.apply(lambda _: uuid.uuid4(), axis=1)\r\n\r\n\r\nhistorical_apy = combined.drop(['id_x', 'id_y', 'price', 'blocktime', 'daily', 'count'], 1)\r\n\r\n\r\n\r\n###################\r\n#Statistics table\r\n###################\r\n\r\n\r\n\r\n\r\nfinal = combined.drop_duplicates('address')[['address', 'protocol_id']]\r\n\r\n\r\n\r\ncorr_weekly = combined.dropna(subset=['weeklyAPY']).groupby('address')[['weeklyAPY', 'tvl']].corr().unstack().iloc[:,1].to_frame(name = \"corr_w\")\r\ncorr_monthly = combined.dropna(subset = ['monthlyAPY']).groupby('address')[['monthlyAPY','tvl']].corr().unstack().iloc[:,1].to_frame(name = \"corr_m\")\r\nvol_weekly = combined.dropna(subset=['weeklyAPY']).groupby('address')[[\"weeklyAPY\"]].std()\r\nvol_monthly = combined.dropna(subset = ['monthlyAPY']).groupby('address')[[\"monthlyAPY\"]].std()\r\n\r\n\r\n\r\nfinal = pd.merge(corr_weekly, final, on = ['address'])\r\nfinal = pd.merge(corr_monthly, final, on = ['address'])\r\nfinal = pd.merge(vol_weekly, final, on = ['address'])\r\nfinal = pd.merge(vol_monthly, final, on = ['address'])\r\nfinal[['timestamp']] = date.today()\r\nfinal[['id']] =final.apply(lambda _: uuid.uuid4(), axis=1)\r\n\r\nhistorical_stats = final.rename(columns={\"monthlyAPY\": \"vol_m\", \"weeklyAPY\": \"vol_w\"})\r\n\r\n\r\n\r\n\r\n"
] | [
[
"pandas.merge",
"pandas.json_normalize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0"
],
"scipy": [],
"tensorflow": []
}
] |
jonathanjameswatson/kivygames | [
"7636580956562af0814c973f94afede926cfa4b9"
] | [
"kivygames/games/noughtsandcrosses/__init__.py"
] | [
"import numpy as np\n\nfrom kivygames.games import Game\n\nimport kivygames.games.noughtsandcrosses.c as c\n\n\nclass CellOccupiedError(Exception):\n pass\n\n\nclass NoughtsAndCrosses(Game):\n minPlayers = 2\n maxPlayers = 2\n hasAI = True\n\n gridShape = (3, 3)\n\n def __init__(self):\n Game.__init__(self)\n\n self.grid = np.zeros(self.gridShape, dtype=\"u1\")\n self.player = 1\n\n def isEmpty(self, position):\n return self.grid[position] == 0\n\n async def turn(self):\n await self.sendOutput(\"Player\", self.player)\n while True:\n position = await self.getInput(\"Position\", tuple, self.player)\n if self.isEmpty(position):\n break\n await self.sendOutput(\"Error\", \"That space is already full.\")\n\n await self.sendOutput(\"Error\", \"\")\n self.grid[position] = self.player\n await self.sendOutput(\"Grid\", self.grid)\n if c.hasPlayerWon(self.grid, self.player):\n await self.sendOutput(\"End\", f\"Player {self.player} wins.\")\n return True\n if np.count_nonzero(self.grid) == 9:\n await self.sendOutput(\"End\", f\"It's a draw!\")\n return True\n self.player = 3 - self.player\n\n return False\n\n def getAIInput(self, name):\n if name == \"Position\":\n return c.minimax(self.player, self.player, True, self.grid)[1]\n\n async def game(self):\n while True:\n ended = await self.turn()\n if ended:\n break\n await self.end()\n"
] | [
[
"numpy.zeros",
"numpy.count_nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
buzmakov/tography_scripts | [
"1551cbe033bce61cb8d52c8a855b3071c67367a8"
] | [
"misc/epif_stones.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.2.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\n# %matplotlib inline\n\n# %%\nimport os\nimport numpy as np\nimport h5py\nimport pylab as plt\nfrom scipy import ndimage as ndi\nimport pickle\n\nfrom scipy.ndimage.morphology import distance_transform_edt\n\nfrom skimage.feature import peak_local_max\nfrom skimage.measure import regionprops\nfrom skimage.segmentation import watershed\nfrom skimage.measure import regionprops\n\n# %%\ndata_files = [\n '/home/krivonosov/reconstruction/e89e7874-3178-4807-8678-df4e9695f4ae/e89e7874-3178-4807-8678-df4e9695f4ae.h5',\n '/home/krivonosov/reconstruction/bc9b34a9-144d-4c2e-bd0d-3ba2f6358eff/bc9b34a9-144d-4c2e-bd0d-3ba2f6358eff.h5']\n\n# %%\nfile_numb = 1\ndf = data_files[file_numb]\n\n# %%\ndata = h5py.File(df, 'r')['Reconstruction'].value\ndata.shape\n\n\n# %%\ndef test_segmentation(image, mask=None):\n plt.figure(figsize=(13,13))\n plt.imshow(image)\n plt.colorbar()\n if not mask is None:\n plt.contour(mask, levels=range(np.max(mask)), colors=['r',])\n plt.show()\n\ntest_segmentation(data[350], data[350]>0.9)\n\n# %%\nmask = data>0.9\n\n# %%\ndata_dtf = distance_transform_edt(mask)\n\n# %%\ntest_segmentation(data_dtf[350])\n\n# %%\nlocal_maxi = peak_local_max(data_dtf, indices=False, \n threshold_abs=2, min_distance=20,# footprint=np.ones((3, 3, 3)),\n labels=mask)\nmarkers, num_features = ndi.label(local_maxi)#, np.ones((3, 3, 3)))\nlabels = watershed(-data_dtf, markers, mask=mask)\n\n# %%\nprint(num_features)\n\n# %%\ntest_segmentation(labels[:,400], mask[:,400])\ntest_segmentation(data[:,400], labels[:,400])\n\ntest_segmentation(labels[400], mask[400])\ntest_segmentation(data[400], labels[400])\n\n# %%\nregions = regionprops(labels)\nprint(len(regions))\n\n# %%\nareas = [r.area for r in regions]\n\n# %%\nplt.figure(figsize=(10,10))\nplt.hist(areas, bins=1000)\nplt.xlim(0,np.percentile(areas, 90))\nplt.grid()\nplt.show()\n\n# %%\nmarkers_m, num_features_m = ndi.label(mask, np.ones((3,3,3)))\n\n# %%\nnum_features_m\n\n# %%\nplt.figure(figsize=(13,13))\nplt.imshow(markers_m[500], vmin=0, vmax=markers_m.max())\nplt.show()\n\nplt.figure(figsize=(13,13))\nplt.imshow(markers_m[:, 500], vmin=0, vmax=markers_m.max())\nplt.show()\n\ntest_segmentation(markers_m[:,400], mask[:,400])\ntest_segmentation(data[:,400], markers_m[:,400])\n\ntest_segmentation(markers_m[400], mask[400])\ntest_segmentation(data[400], markers_m[400])\n\n# %%\nregions_m = regionprops(markers_m)\nprint(len(regions_m))\n\nwith open(f'e_{file_numb}.pkl','bw') as pf:\n pickle.dump(regions_m, pf)\n\n# %%\nareas_m = [r.area for r in regions_m]\nx,y = np.histogram(areas_m, bins=10000)\nplt.figure(figsize=(10,10))\nplt.semilogy(y[:-1],x,'o')\nplt.xlim(0,np.percentile(areas_m, 99))\nplt.grid()\nplt.show()\n\n\n# %%\ndef reshape_volume(volume, reshape):\n res = np.zeros([s//reshape for s in volume.shape], dtype='float32')\n xs,ys,zs = [s*reshape for s in res.shape]\n for x,y,z in np.ndindex(reshape, reshape, reshape):\n res += volume[x:xs:reshape, y:ys:reshape, z:zs:reshape]\n return res/reshape**3\n\ndef save_amira(in_array, out_path, reshape=3):\n data_path = out_path\n with open(os.path.join(data_path, 'amira.raw'), 'wb') as amira_file:\n reshaped_vol = reshape_volume(in_array, reshape)\n reshaped_vol.tofile(amira_file)\n file_shape = reshaped_vol.shape\n with open(os.path.join(data_path, 'tomo.hx'), 'w') as af:\n af.write('# Amira Script\\n')\n af.write('remove -all\\n')\n af.write(r'[ load -raw ${SCRIPTDIR}/amira.raw little xfastest float 1 '+\n str(file_shape[2])+' '+str(file_shape[1])+' '+str(file_shape[0])+\n ' 0 '+str(file_shape[2]-1)+' 0 '+str(file_shape[1]-1)+' 0 '+str(file_shape[0]-1)+\n ' ] setLabel tomo.raw\\n')\n\n\n# %%\n# save_amira(markers_m, '.', 1)\n\n# %%\nregs = []\nfor f_numb_x in [0,1]:\n with open(f'e_{f_numb_x}.pkl','rb') as pf:\n print(f_numb_x)\n regs.append(pickle.load(pf))\n\n# %%\nfor i in [0,1]:\n area = [np.power(3./4*r.area,1/3.) for r in regs[i] if r.area>3]\n pos = [np.asarray([(r.bbox[0]+r.bbox[3])/2, (r.bbox[1]+r.bbox[4])/2, (r.bbox[2]+r.bbox[5])/2]) \n for r in regs[i] if r.area>3]\n dists = []\n for p0 in pos:\n dists.append(np.min([np.linalg.norm(p0-p1) for p1 in pos if not np.linalg.norm(p0-p1)==0]))\n# area = np.sort(area)[:-10]\n print(f'sample#{i}:\\n' +\n f'\\t number of inclusions: {len(area)}\\n' +\n f'\\t radius:mean:{np.mean(area):.4} std:{np.std(area):.4}' +\n f'25%:{np.percentile(area, 25):.4} median:{np.median(area):.4} 75%:{np.percentile(area, 75):.4}\\n'+\n f'\\t distance :mean:{np.mean(dists):.4} std:{np.std(dists):.4}'\n )\n\n\n\n# %%\nfor i in [0,1]:\n area = [np.power(3./4*r.area,1/3.) for r in regs[i] if r.area>3]\n print(f'sample#{i}:\\n' +\n f'\\t number of inclusions: {len(area)}\\n'+\n f'\\t number of big inclusions: {np.sum(np.asarray(area)>10)}\\n'\n )\n\n# %%\nplt.hist(area)\n\n# %%\n"
] | [
[
"numpy.power",
"numpy.asarray",
"numpy.median",
"numpy.linalg.norm",
"numpy.percentile",
"numpy.ones",
"scipy.ndimage.label",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.ndindex",
"numpy.histogram",
"numpy.zeros",
"scipy.ndimage.morphology.distance_transform_edt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
paulolimac/Copulas | [
"26d700b5cf203d1e71e50bb86b9c3dd6f25dfe1a"
] | [
"copulas/bivariate/clayton.py"
] | [
"import numpy as np\n\nfrom copulas.bivariate.base import Bivariate, CopulaTypes\n\n\nclass Clayton(Bivariate):\n \"\"\"Class for clayton copula model.\"\"\"\n\n copula_type = CopulaTypes.CLAYTON\n theta_interval = [-1, float('inf')]\n invalid_thetas = [0]\n\n def generator(self, t):\n \"\"\"Return the generator function.\"\"\"\n self.check_fit()\n\n return 1.0 / self.theta * (np.power(t, -self.theta) - 1)\n\n def probability_density(self, X):\n \"\"\"Compute probability density function for given copula family.\n\n Args:\n X: `np.ndarray`\n\n Returns:\n np.array: Probability density for the input values.\n \"\"\"\n self.check_fit()\n\n U, V = self.split_matrix(X)\n\n a = (self.theta + 1) * np.power(np.multiply(U, V), -(self.theta + 1))\n b = np.power(U, -self.theta) + np.power(V, -self.theta) - 1\n c = -(2 * self.theta + 1) / self.theta\n return a * np.power(b, c)\n\n def cumulative_distribution(self, X):\n \"\"\"Computes the cumulative distribution function for the copula, :math:`C(u, v)`\n\n Args:\n X: `np.ndarray`\n\n Returns:\n np.array: cumulative probability\n \"\"\"\n self.check_fit()\n\n U, V = self.split_matrix(X)\n\n if (V == 0).all() or (U == 0).all():\n return np.zeros(V.shape[0])\n\n else:\n cdfs = [\n np.power(\n np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1,\n -1.0 / self.theta\n )\n if U[i] > 0 else 0\n for i in range(len(U))\n ]\n\n return np.array([max(x, 0) for x in cdfs])\n\n def percent_point(self, y, V):\n \"\"\"Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1`\n\n Args:\n y: `np.ndarray` value of :math:`C(u|v)`.\n v: `np.ndarray` given value of v.\n \"\"\"\n self.check_fit()\n\n if self.theta < 0:\n return V\n\n else:\n a = np.power(y, self.theta / (-1 - self.theta))\n b = np.power(V, self.theta)\n u = np.power((a + b - 1) / b, -1 / self.theta)\n return u\n\n def partial_derivative(self, X, y=0):\n \"\"\"Compute partial derivative :math:`C(u|v)` of cumulative distribution.\n\n Args:\n X: `np.ndarray`\n y: `float`\n\n Returns:\n np.ndarray: Derivatives\n \"\"\"\n self.check_fit()\n\n U, V = self.split_matrix(X)\n\n if self.theta == 0:\n return V\n\n else:\n A = np.power(V, -self.theta - 1)\n B = np.power(V, -self.theta) + np.power(U, -self.theta) - 1\n h = np.power(B, (-1 - self.theta) / self.theta)\n return np.multiply(A, h) - y\n\n def compute_theta(self):\n \"\"\"Compute theta parameter using Kendall's tau.\n\n On Clayton copula this is :math:`τ = θ/(θ + 2) \\\\implies θ = 2τ/(1-τ)` with\n :math:`θ ∈ (0, ∞)`.\n\n On the corner case of :math:`τ = 1`, a big enough number is returned instead of infinity.\n \"\"\"\n if self.tau == 1:\n theta = 10000\n\n else:\n theta = 2 * self.tau / (1 - self.tau)\n\n return theta\n"
] | [
[
"numpy.zeros",
"numpy.multiply",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tevien/bryolo | [
"05d15ff0eab936e085f7339616e2874032c6c935"
] | [
"utils/datasets.py"
] | [
"# YOLOv5 dataset utils and dataloaders\n\nimport glob\nimport hashlib\nimport json\nimport logging\nimport os\nimport random\nimport shutil\nimport time\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool, Pool\nfrom pathlib import Path\nfrom threading import Thread\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport yaml\nfrom PIL import Image, ExifTags\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\nfrom utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective\nfrom utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \\\n xyn2xy, segments2boxes, clean_str\nfrom utils.torch_utils import torch_distributed_zero_first\n\n# Parameters\nHELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'\nIMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes\nVID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes\nNUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads\n\n# Get orientation exif tag\nfor orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == 'Orientation':\n break\n\n\ndef get_hash(paths):\n # Returns a single hash value of a list of paths (files or dirs)\n size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes\n h = hashlib.md5(str(size).encode()) # hash sizes\n h.update(''.join(paths).encode()) # hash paths\n return h.hexdigest() # return hash\n\n\ndef exif_size(img):\n # Returns exif-corrected PIL size\n s = img.size # (width, height)\n try:\n rotation = dict(img._getexif().items())[orientation]\n if rotation == 6: # rotation 270\n s = (s[1], s[0])\n elif rotation == 8: # rotation 90\n s = (s[1], s[0])\n except:\n pass\n\n return s\n\n\ndef exif_transpose(image):\n \"\"\"\n Transpose a PIL image accordingly if it has an EXIF Orientation tag.\n From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py\n\n :param image: The image to transpose.\n :return: An image.\n \"\"\"\n exif = image.getexif()\n orientation = exif.get(0x0112, 1) # default 1\n if orientation > 1:\n method = {2: Image.FLIP_LEFT_RIGHT,\n 3: Image.ROTATE_180,\n 4: Image.FLIP_TOP_BOTTOM,\n 5: Image.TRANSPOSE,\n 6: Image.ROTATE_270,\n 7: Image.TRANSVERSE,\n 8: Image.ROTATE_90,\n }.get(orientation)\n if method is not None:\n image = image.transpose(method)\n del exif[0x0112]\n image.info[\"exif\"] = exif.tobytes()\n return image\n\n\ndef create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,\n rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset\n\n\nclass InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\" Dataloader that reuses workers\n\n Uses same syntax as vanilla DataLoader\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))\n self.iterator = super().__iter__()\n\n def __len__(self):\n return len(self.batch_sampler.sampler)\n\n def __iter__(self):\n for i in range(len(self)):\n yield next(self.iterator)\n\n\nclass _RepeatSampler(object):\n \"\"\" Sampler that repeats forever\n\n Args:\n sampler (Sampler)\n \"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n yield from iter(self.sampler)\n\n\nclass LoadImages: # for inference\n def __init__(self, path, img_size=640, stride=32):\n p = str(Path(path).absolute()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n if not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, 'Image Not Found ' + path\n print(f'image {self.count}/{self.nf} {path}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files\n\n\nclass LoadWebcam: # for inference\n def __init__(self, pipe='0', img_size=640, stride=32):\n self.img_size = img_size\n self.stride = stride\n self.pipe = eval(pipe) if pipe.isnumeric() else pipe\n self.cap = cv2.VideoCapture(self.pipe) # video capture object\n self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if cv2.waitKey(1) == ord('q'): # q to quit\n self.cap.release()\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Read frame\n ret_val, img0 = self.cap.read()\n img0 = cv2.flip(img0, 1) # flip left-right\n\n # Print\n assert ret_val, f'Camera Error {self.pipe}'\n img_path = 'webcam.jpg'\n print(f'webcam {self.count}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return img_path, img, img0, None\n\n def __len__(self):\n return 0\n\n\nclass LoadStreams: # multiple IP or RTSP cameras\n def __init__(self, sources='streams.txt', img_size=640, stride=32):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources, 'r') as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n print(f'{i + 1}/{n}: {s}... ', end='')\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)\n print(f\" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n print('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n self.imgs[i] = im if success else self.imgs[i] * 0\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years\n\n\ndef img2label_paths(img_paths):\n # Define label paths as a function of image paths\n sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings\n return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]\n\n\nclass LoadImagesAndLabels(Dataset): # for training/testing\n def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,\n cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):\n self.img_size = img_size\n self.augment = augment\n self.hyp = hyp\n self.image_weights = image_weights\n self.rect = False if image_weights else rect\n self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)\n self.mosaic_border = [-img_size // 2, -img_size // 2]\n self.stride = stride\n self.path = path\n self.albumentations = Albumentations() if augment else None\n\n try:\n f = [] # image files\n for p in path if isinstance(path, list) else [path]:\n p = Path(p) # os-agnostic\n if p.is_dir(): # dir\n f += glob.glob(str(p / '**' / '*.*'), recursive=True)\n # f = list(p.rglob('**/*.*')) # pathlib\n elif p.is_file(): # file\n with open(p, 'r') as t:\n t = t.read().strip().splitlines()\n parent = str(p.parent) + os.sep\n f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path\n # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)\n else:\n raise Exception(f'{prefix}{p} does not exist')\n self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])\n # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib\n assert self.img_files, f'{prefix}No images found'\n except Exception as e:\n raise Exception(f'{prefix}Error loading data from {path}: {e}\\nSee {HELP_URL}')\n\n # Check cache\n self.label_files = img2label_paths(self.img_files) # labels\n cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')\n try:\n cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict\n assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files)\n except:\n cache, exists = self.cache_labels(cache_path, prefix), False # cache\n\n # Display cache\n nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total\n if exists:\n d = f\"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted\"\n tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results\n if cache['msgs']:\n logging.info('\\n'.join(cache['msgs'])) # display warnings\n assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'\n\n # Read cache\n [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items\n labels, shapes, self.segments = zip(*cache.values())\n self.labels = list(labels)\n self.shapes = np.array(shapes, dtype=np.float64)\n self.img_files = list(cache.keys()) # update\n self.label_files = img2label_paths(cache.keys()) # update\n if single_cls:\n for x in self.labels:\n x[:, 0] = 0\n\n n = len(shapes) # number of images\n bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index\n nb = bi[-1] + 1 # number of batches\n self.batch = bi # batch index of image\n self.n = n\n self.indices = range(n)\n\n # Rectangular Training\n if self.rect:\n # Sort by aspect ratio\n s = self.shapes # wh\n ar = s[:, 1] / s[:, 0] # aspect ratio\n irect = ar.argsort()\n self.img_files = [self.img_files[i] for i in irect]\n self.label_files = [self.label_files[i] for i in irect]\n self.labels = [self.labels[i] for i in irect]\n self.shapes = s[irect] # wh\n ar = ar[irect]\n\n # Set training image shapes\n shapes = [[1, 1]] * nb\n for i in range(nb):\n ari = ar[bi == i]\n mini, maxi = ari.min(), ari.max()\n if maxi < 1:\n shapes[i] = [maxi, 1]\n elif mini > 1:\n shapes[i] = [1, 1 / mini]\n\n self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride\n\n # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)\n self.imgs, self.img_npy = [None] * n, [None] * n\n if cache_images:\n if cache_images == 'disk':\n self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')\n self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]\n self.im_cache_dir.mkdir(parents=True, exist_ok=True)\n gb = 0 # Gigabytes of cached images\n self.img_hw0, self.img_hw = [None] * n, [None] * n\n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))\n pbar = tqdm(enumerate(results), total=n)\n for i, x in pbar:\n if cache_images == 'disk':\n if not self.img_npy[i].exists():\n np.save(self.img_npy[i].as_posix(), x[0])\n gb += self.img_npy[i].stat().st_size\n else:\n self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)\n gb += self.imgs[i].nbytes\n pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'\n pbar.close()\n\n def cache_labels(self, path=Path('./labels.cache'), prefix=''):\n # Cache dataset labels, check images and read shapes\n x = {} # dict\n nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages\n desc = f\"{prefix}Scanning '{path.parent / path.stem}' images and labels...\"\n with Pool(NUM_THREADS) as pool:\n pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),\n desc=desc, total=len(self.img_files))\n for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:\n nm += nm_f\n nf += nf_f\n ne += ne_f\n nc += nc_f\n if im_file:\n x[im_file] = [l, shape, segments]\n if msg:\n msgs.append(msg)\n pbar.desc = f\"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted\"\n\n pbar.close()\n if msgs:\n logging.info('\\n'.join(msgs))\n if nf == 0:\n logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')\n x['hash'] = get_hash(self.label_files + self.img_files)\n x['results'] = nf, nm, ne, nc, len(self.img_files)\n x['msgs'] = msgs # warnings\n x['version'] = 0.4 # cache version\n try:\n np.save(path, x) # save cache for next time\n path.with_suffix('.cache.npy').rename(path) # remove .npy suffix\n logging.info(f'{prefix}New cache created: {path}')\n except Exception as e:\n logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable\n return x\n\n def __len__(self):\n return len(self.img_files)\n\n # def __iter__(self):\n # self.count = -1\n # print('ran dataset iter')\n # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n # return self\n\n def __getitem__(self, index):\n index = self.indices[index] # linear, shuffled, or image_weights\n\n hyp = self.hyp\n mosaic = self.mosaic and random.random() < hyp['mosaic']\n if mosaic:\n # Load mosaic\n img, labels = load_mosaic(self, index)\n shapes = None\n\n # MixUp augmentation\n if random.random() < hyp['mixup']:\n img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))\n\n else:\n # Load image\n img, (h0, w0), (h, w) = load_image(self, index)\n\n # Letterbox\n shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape\n img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling\n\n labels = self.labels[index].copy()\n if labels.size: # normalized xywh to pixel xyxy format\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n if self.augment:\n img, labels = random_perspective(img, labels,\n degrees=hyp['degrees'],\n translate=hyp['translate'],\n scale=hyp['scale'],\n shear=hyp['shear'],\n perspective=hyp['perspective'])\n\n nl = len(labels) # number of labels\n if nl:\n labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)\n\n if self.augment:\n # Albumentations\n img, labels = self.albumentations(img, labels)\n\n # HSV color-space\n augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])\n\n # Flip up-down\n if random.random() < hyp['flipud']:\n img = np.flipud(img)\n if nl:\n labels[:, 2] = 1 - labels[:, 2]\n\n # Flip left-right\n if random.random() < hyp['fliplr']:\n img = np.fliplr(img)\n if nl:\n labels[:, 1] = 1 - labels[:, 1]\n\n # Cutouts\n # labels = cutout(img, labels, p=0.5)\n\n labels_out = torch.zeros((nl, 6))\n if nl:\n labels_out[:, 1:] = torch.from_numpy(labels)\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return torch.from_numpy(img), labels_out, self.img_files[index], shapes\n\n @staticmethod\n def collate_fn(batch):\n img, label, path, shapes = zip(*batch) # transposed\n for i, l in enumerate(label):\n l[:, 0] = i # add target image index for build_targets()\n return torch.stack(img, 0), torch.cat(label, 0), path, shapes\n\n @staticmethod\n def collate_fn4(batch):\n img, label, path, shapes = zip(*batch) # transposed\n n = len(shapes) // 4\n img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]\n\n ho = torch.tensor([[0., 0, 0, 1, 0, 0]])\n wo = torch.tensor([[0., 0, 1, 0, 0, 0]])\n s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale\n for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW\n i *= 4\n if random.random() < 0.5:\n im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[\n 0].type(img[i].type())\n l = label[i]\n else:\n im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)\n l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s\n img4.append(im)\n label4.append(l)\n\n for i, l in enumerate(label4):\n l[:, 0] = i # add target image index for build_targets()\n\n return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4\n\n\n# Ancillary functions --------------------------------------------------------------------------------------------------\ndef load_image(self, i):\n # loads 1 image from dataset index 'i', returns im, original hw, resized hw\n im = self.imgs[i]\n if im is None: # not cached in ram\n npy = self.img_npy[i]\n if npy and npy.exists(): # load npy\n im = np.load(npy)\n else: # read image\n path = self.img_files[i]\n im = cv2.imread(path) # BGR\n assert im is not None, 'Image Not Found ' + path\n h0, w0 = im.shape[:2] # orig hw\n r = self.img_size / max(h0, w0) # ratio\n if r != 1: # if sizes are not equal\n im = cv2.resize(im, (int(w0 * r), int(h0 * r)),\n interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)\n return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized\n else:\n return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized\n\n\ndef load_mosaic(self, index):\n # loads images in a 4-mosaic\n\n labels4, segments4 = [], []\n s = self.img_size\n yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y\n indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = load_image(self, index)\n\n # place img in img4\n if i == 0: # top left\n img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)\n elif i == 1: # top right\n x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n elif i == 2: # bottom left\n x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n elif i == 3: # bottom right\n x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n padw = x1a - x1b\n padh = y1a - y1b\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n labels4.append(labels)\n segments4.extend(segments)\n\n # Concat/clip labels\n labels4 = np.concatenate(labels4, 0)\n for x in (labels4[:, 1:], *segments4):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img4, labels4 = replicate(img4, labels4) # replicate\n\n # Augment\n img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])\n img4, labels4 = random_perspective(img4, labels4, segments4,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img4, labels4\n\n\ndef load_mosaic9(self, index):\n # loads images in a 9-mosaic\n\n labels9, segments9 = [], []\n s = self.img_size\n indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = load_image(self, index)\n\n # place img in img9\n if i == 0: # center\n img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n h0, w0 = h, w\n c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates\n elif i == 1: # top\n c = s, s - h, s + w, s\n elif i == 2: # top right\n c = s + wp, s - h, s + wp + w, s\n elif i == 3: # right\n c = s + w0, s, s + w0 + w, s + h\n elif i == 4: # bottom right\n c = s + w0, s + hp, s + w0 + w, s + hp + h\n elif i == 5: # bottom\n c = s + w0 - w, s + h0, s + w0, s + h0 + h\n elif i == 6: # bottom left\n c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h\n elif i == 7: # left\n c = s - w, s + h0 - h, s, s + h0\n elif i == 8: # top left\n c = s - w, s + h0 - hp - h, s, s + h0 - hp\n\n padx, pady = c[:2]\n x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padx, pady) for x in segments]\n labels9.append(labels)\n segments9.extend(segments)\n\n # Image\n img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]\n hp, wp = h, w # height, width previous\n\n # Offset\n yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y\n img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]\n\n # Concat/clip labels\n labels9 = np.concatenate(labels9, 0)\n labels9[:, [1, 3]] -= xc\n labels9[:, [2, 4]] -= yc\n c = np.array([xc, yc]) # centers\n segments9 = [x - c for x in segments9]\n\n for x in (labels9[:, 1:], *segments9):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img9, labels9 = replicate(img9, labels9) # replicate\n\n # Augment\n img9, labels9 = random_perspective(img9, labels9, segments9,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img9, labels9\n\n\ndef create_folder(path='./new'):\n # Create folder\n if os.path.exists(path):\n shutil.rmtree(path) # delete output folder\n os.makedirs(path) # make new output folder\n\n\ndef flatten_recursive(path='../datasets/coco128'):\n # Flatten a recursive directory by bringing all files to top level\n new_path = Path(path + '_flat')\n create_folder(new_path)\n for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):\n shutil.copyfile(file, new_path / Path(file).name)\n\n\ndef extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()\n # Convert detection dataset into classification dataset, with one directory per class\n path = Path(path) # images dir\n shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing\n files = list(path.rglob('*.*'))\n n = len(files) # number of files\n for im_file in tqdm(files, total=n):\n if im_file.suffix[1:] in IMG_FORMATS:\n # image\n im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB\n h, w = im.shape[:2]\n\n # labels\n lb_file = Path(img2label_paths([str(im_file)])[0])\n if Path(lb_file).exists():\n with open(lb_file, 'r') as f:\n lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels\n\n for j, x in enumerate(lb):\n c = int(x[0]) # class\n f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename\n if not f.parent.is_dir():\n f.parent.mkdir(parents=True)\n\n b = x[1:] * [w, h, w, h] # box\n # b[2:] = b[2:].max() # rectangle to square\n b[2:] = b[2:] * 1.2 + 3 # pad\n b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)\n\n b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image\n b[[1, 3]] = np.clip(b[[1, 3]], 0, h)\n assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'\n\n\ndef autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):\n \"\"\" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files\n Usage: from utils.datasets import *; autosplit()\n Arguments\n path: Path to images directory\n weights: Train, val, test weights (list, tuple)\n annotated_only: Only use images with an annotated txt file\n \"\"\"\n path = Path(path) # images dir\n files = sum([list(path.rglob(f\"*.{img_ext}\")) for img_ext in IMG_FORMATS], []) # image files only\n n = len(files) # number of files\n random.seed(0) # for reproducibility\n indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split\n\n txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files\n [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing\n\n print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)\n for i, img in tqdm(zip(indices, files), total=n):\n if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label\n with open(path.parent / txt[i], 'a') as f:\n f.write('./' + img.relative_to(path.parent).as_posix() + '\\n') # add image to txt file\n\n\ndef verify_image_label(args):\n # Verify one image-label pair\n im_file, lb_file, prefix = args\n nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt\n try:\n # verify images\n im = Image.open(im_file)\n im.verify() # PIL verify\n shape = exif_size(im) # image size\n assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'\n assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'\n if im.format.lower() in ('jpg', 'jpeg'):\n with open(im_file, 'rb') as f:\n f.seek(-2, 2)\n assert f.read() == b'\\xff\\xd9', 'corrupted JPEG'\n\n # verify labels\n segments = [] # instance segments\n if os.path.isfile(lb_file):\n nf = 1 # label found\n with open(lb_file, 'r') as f:\n l = [x.split() for x in f.read().strip().splitlines() if len(x)]\n if any([len(x) > 8 for x in l]): # is segment\n classes = np.array([x[0] for x in l], dtype=np.float32)\n segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)\n l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)\n l = np.array(l, dtype=np.float32)\n if len(l):\n assert l.shape[1] == 5, 'labels require 5 columns each'\n assert (l >= 0).all(), 'negative labels'\n assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'\n assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'\n else:\n ne = 1 # label empty\n l = np.zeros((0, 5), dtype=np.float32)\n else:\n nm = 1 # label missing\n l = np.zeros((0, 5), dtype=np.float32)\n return im_file, l, shape, segments, nm, nf, ne, nc, ''\n except Exception as e:\n nc = 1\n msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'\n return [None, None, None, None, nm, nf, ne, nc, msg]\n\n\ndef dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):\n \"\"\" Return dataset statistics dictionary with images and instances counts per split per class\n To run in parent directory: export PYTHONPATH=\"$PWD/yolov5\"\n Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)\n Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')\n Arguments\n path: Path to data.yaml or data.zip (with data.yaml inside data.zip)\n autodownload: Attempt to download dataset if not found locally\n verbose: Print stats dictionary\n \"\"\"\n\n def round_labels(labels):\n # Update labels to integer class and 6 decimal place floats\n return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]\n\n def unzip(path):\n # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'\n if str(path).endswith('.zip'): # path is data.zip\n assert Path(path).is_file(), f'Error unzipping {path}, file not found'\n assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}'\n dir = path.with_suffix('') # dataset directory\n return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path\n else: # path is data.yaml\n return False, None, path\n\n def hub_ops(f, max_dim=1920):\n # HUB ops for 1 image 'f'\n im = Image.open(f)\n r = max_dim / max(im.height, im.width) # ratio\n if r < 1.0: # image too large\n im = im.resize((int(im.width * r), int(im.height * r)))\n im.save(im_dir / Path(f).name, quality=75) # save\n\n zipped, data_dir, yaml_path = unzip(Path(path))\n with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:\n data = yaml.safe_load(f) # data dict\n if zipped:\n data['path'] = data_dir # TODO: should this be dir.resolve()?\n check_dataset(data, autodownload) # download dataset if missing\n hub_dir = Path(data['path'] + ('-hub' if hub else ''))\n stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary\n for split in 'train', 'val', 'test':\n if data.get(split) is None:\n stats[split] = None # i.e. no test set\n continue\n x = []\n dataset = LoadImagesAndLabels(data[split]) # load dataset\n for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):\n x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))\n x = np.array(x) # shape(128x80)\n stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},\n 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),\n 'per_class': (x > 0).sum(0).tolist()},\n 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in\n zip(dataset.img_files, dataset.labels)]}\n\n if hub:\n im_dir = hub_dir / 'images'\n im_dir.mkdir(parents=True, exist_ok=True)\n for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):\n pass\n\n # Profile\n stats_path = hub_dir / 'stats.json'\n if profile:\n for _ in range(1):\n file = stats_path.with_suffix('.npy')\n t1 = time.time()\n np.save(file, stats)\n t2 = time.time()\n x = np.load(file, allow_pickle=True)\n print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')\n\n file = stats_path.with_suffix('.json')\n t1 = time.time()\n with open(file, 'w') as f:\n json.dump(stats, f) # save stats *.json\n t2 = time.time()\n with open(file, 'r') as f:\n x = json.load(f) # load hyps dict\n print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')\n\n # Save, print and return\n if hub:\n print(f'Saving {stats_path.resolve()}...')\n with open(stats_path, 'w') as f:\n json.dump(stats, f) # save stats.json\n if verbose:\n print(json.dumps(stats, indent=2, sort_keys=False))\n return stats\n"
] | [
[
"torch.zeros",
"torch.cat",
"numpy.flipud",
"numpy.concatenate",
"numpy.all",
"torch.utils.data.distributed.DistributedSampler",
"numpy.clip",
"numpy.fliplr",
"numpy.arange",
"numpy.unique",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"numpy.save",
"numpy.full",
"numpy.load",
"numpy.zeros",
"numpy.ascontiguousarray",
"torch.stack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Orange-OpenSource/AIVC | [
"8534111d1e08cdbf7efa92ebbb105af3c9044521"
] | [
"src/real_life/encode.py"
] | [
"# Software Name: AIVC\n# SPDX-FileCopyrightText: Copyright (c) 2021 Orange\n# SPDX-License-Identifier: BSD 3-Clause \"New\"\n#\n# This software is distributed under the BSD-3-Clause license.\n#\n# Authors: Theo Ladune <[email protected]>\n# Pierrick Philippe <[email protected]>\n\nimport os\nimport math\nimport time\nimport glob\n\nfrom numpy.random import randint\nfrom sys import maxsize\n\nfrom model_mngt.model_management import infer_one_sequence\nfrom func_util.nn_util import get_value\nfrom func_util.console_display import print_log_msg\n\n\ndef encode(param):\n DEFAULT_PARAM = {\n # The model to be evaluated. Must be a nn.Module\n 'model': None,\n # Absolute path of the folder containing the PNGs.\n 'sequence_path': '',\n # The GOP structure name, used only for logging, must be a string\n 'GOP_struct_name': '',\n # The GOP structure defined as in func_util/GOP_structure.py\n 'GOP_struct': None,\n # For multi-rate\n 'idx_rate': 0.,\n # Path of the final bitstream file\n 'final_file': '',\n # Set to true to generate more stuff, useful for debug\n 'flag_bitstream_debug': False,\n # First and last frame to encode (included)\n 'idx_starting_frame': 1,\n # If set to -1: encode until the last frame\n 'idx_end_frame': -1,\n }\n\n # =========================== RETRIEVE INPUTS =========================== #\n model = get_value('model', param, DEFAULT_PARAM)\n sequence_path = get_value('sequence_path', param, DEFAULT_PARAM)\n GOP_struct = get_value('GOP_struct', param, DEFAULT_PARAM)\n GOP_struct_name = get_value('GOP_struct_name', param, DEFAULT_PARAM)\n flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)\n idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)\n final_file = get_value('final_file', param, DEFAULT_PARAM)\n idx_starting_frame = get_value('idx_starting_frame', param, DEFAULT_PARAM)\n idx_end_frame = get_value('idx_end_frame', param, DEFAULT_PARAM)\n\n # =========================== RETRIEVE INPUTS =========================== #\n\n # Random number for the bitstream dir. Then count what's inside with glob to increment the number\n # Format of the bitstream dir: ../tmp/RANDOMNUMBER_X/tmp_out_bitstream_dir\n # Where X is the result of len(glob.glob())\n random_bitstream_dir = str(randint(maxsize))\n bitstream_dir = '../tmp/' + str(random_bitstream_dir) + '_' \\\n + str(len(glob.glob('../tmp/' + str(random_bitstream_dir) + '*'))) + '/tmp_bitstream_working_dir/'\n\n if final_file == bitstream_dir.split('/')[-2]:\n print('ERROR: The bitstream file can not be in bitstream_dir')\n print('ERROR: Please change your directory!')\n return\n\n if not(sequence_path.endswith('/')):\n sequence_path += '/'\n\n # Count number of PNGs for the Y channel in sequence_path\n # The goal is to get the max index, no need to sort glob.glob\n png_list = glob.glob(sequence_path + '*_y.png')\n png_idx = [int(x.split('/')[-1].rstrip('_y.png')) for x in png_list]\n max_index = max(png_idx)\n\n if (idx_starting_frame > idx_end_frame) and (idx_end_frame != -1):\n print('ERROR: First frame index bigger than last frame index')\n return\n # PNG numbering starts at 0!\n if idx_end_frame > max_index:\n print('ERROR: Last frame index exceeds the last frame index')\n return\n\n if idx_end_frame == -1:\n idx_end_frame = max_index\n\n # Clean bitstream directory\n os.system('rm -r ' + bitstream_dir)\n os.system('rm ' + bitstream_dir.rstrip('/'))\n os.system('mkdir -p ' + bitstream_dir)\n\n # Mainly use to output some debug values such as estimated rate, real-rate or PSNR\n encoder_out = {}\n working_dir = './logs/'\n print_log_msg('INFO', 'Start encoding', '', '')\n start_time = time.time()\n infer_one_sequence({\n 'model': model,\n 'GOP_struct_name': GOP_struct_name,\n 'GOP_struct': GOP_struct,\n 'sequence_path': sequence_path,\n 'idx_starting_frame': idx_starting_frame,\n 'idx_end_frame': idx_end_frame,\n 'idx_rate': idx_rate,\n 'loading_mode': 'old',\n 'bitstream_dir': bitstream_dir,\n 'generate_bitstream': True,\n 'flag_bitstream_debug': flag_bitstream_debug,\n 'final_bitstream_path': final_file,\n 'working_dir': working_dir,\n })\n # We're done for this sequence!\n elapsed_time = time.time() - start_time\n print_log_msg('INFO', 'Encoding done', '', '')\n print_log_msg('INFO', 'Bitstream path', '', final_file)\n\n # Measure the size of the data.zip file\n encoder_out = {}\n encoder_out['real_rate_byte'] = os.path.getsize(final_file)\n\n # Read log file to display some info\n result_file_name = working_dir + 'detailed.txt'\n f = open(result_file_name, 'r')\n\n # Last line = summary of the encoding\n line = f.readlines()[-1]\n # Parse line\n line = [x.lstrip(' ').rstrip(' ') for x in line.rstrip('\\n').split('|')][1:-1]\n cur_psnr = float(line[2])\n cur_rate_bpp = float(line[3])\n cur_ms_ssim_db = float(line[9])\n cur_h = float(line[10])\n cur_w = float(line[11])\n\n # Number of frames we wanted to code.\n nb_frames_to_code = idx_end_frame - idx_starting_frame + 1\n # How many frames did we code in practice: add the padded frames.\n nb_coded_frames = math.ceil(nb_frames_to_code / len(GOP_struct)) * len(GOP_struct)\n # This is the estimated rate in byte\n cur_rate_byte = cur_rate_bpp * cur_h * cur_w * nb_coded_frames / 8\n\n encoder_out['psnr'] = cur_psnr\n encoder_out['ms_ssim_db'] = cur_ms_ssim_db\n encoder_out['h'] = cur_h\n encoder_out['w'] = cur_w\n encoder_out['nb_coded_frames'] = nb_coded_frames\n encoder_out['nb_frames_to_code'] = nb_frames_to_code\n encoder_out['nb_frames_gop'] = len(GOP_struct)\n encoder_out['estimated_rate_byte'] = cur_rate_byte\n rate_overhead = (encoder_out.get('real_rate_byte') / encoder_out.get('estimated_rate_byte') - 1) * 100\n encoder_out['rate_overhead_percent'] = rate_overhead\n\n # Display the encoding results\n print_log_msg('INFO', 'Frame resolution', '[H x W]', str(int(encoder_out.get('h'))) + ' x ' + str(int(encoder_out.get('w'))))\n print_log_msg('INFO', 'First coded frame', '[frame]', int(idx_starting_frame))\n print_log_msg('INFO', 'Last coded frame', '[frame]', int(idx_end_frame))\n print_log_msg('INFO', 'Number of frames to code', '[frame]', int(encoder_out.get('nb_frames_to_code')))\n print_log_msg('INFO', 'Number of frames coded', '[frame]', int(encoder_out.get('nb_coded_frames')))\n print_log_msg('INFO', 'Intra-period', '[frame]', int(encoder_out.get('nb_frames_gop')))\n print_log_msg('RESULT', 'Number of frames', '[frame]', int(idx_end_frame - idx_starting_frame + 1))\n print_log_msg('RESULT', 'Encoding/decoding time', '[s]', '%.1f' % (elapsed_time))\n print_log_msg('RESULT', 'Encoding/decoding FPS', '[frame/s]', '%.1f' % ((idx_end_frame - idx_starting_frame + 1) / elapsed_time))\n print_log_msg('RESULT', 'Estimated PSNR', '[dB]', '%.4f' % (encoder_out.get('psnr')))\n print_log_msg('RESULT', 'Estimated MS-SSIM', '[dB]', '%.4f' % (encoder_out.get('ms_ssim_db')))\n print_log_msg('RESULT', 'Estimated rate', '[byte]', '%.1f' % (encoder_out.get('estimated_rate_byte')))\n print_log_msg('RESULT', 'Real rate', '[byte]', int(encoder_out.get('real_rate_byte')))\n print_log_msg('RESULT', 'Estimated rate overhead', '[%]', '%.2f' % (encoder_out.get('rate_overhead_percent')))\n\n # Clean the internal bitstream working dir\n os.system('rm -r ' + bitstream_dir)\n os.system('rmdir ' + '/'.join(bitstream_dir.rstrip('/').split('/')[:-1]))\n\n return encoder_out\n"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eusojk/soil_apis | [
"630ddd49802c351b44df28225707a53adad782b5"
] | [
"make_static_soil_db.py"
] | [
"import os\nimport glob\nimport argparse\nimport sys\nimport pandas as pd\nimport soilapis.extract_country_bbox as ecb\nfrom pathlib import Path\nfrom soilapis.calculator import SoilConnector\nfrom shutil import copyfile, copyfileobj\n\ndirname = ''\n\ndef make_static_soil_db(soil_dir, country='Thailand'):\n \"\"\"\n Main function to create the static SOL\n :param soil_dir: main directory containing the soil properties (bulk density, organic carbon, clay, sand\n :param country: the name of the country of interest. This is set to Thailand by default\n :return: pathname to the final .SOL\n \"\"\"\n\n # what is the iso code of this country?\n country_iso = ecb.get_country_iso(country)\n\n # check if country is valid\n if country_iso is None:\n return\n\n # check if this country is suitable for the script:\n if is_loc_file_present(country_iso) is None:\n print(\"{} is not currently supported :(\".format(country))\n return\n # else:\n # print(\"Yay!\")\n # return\n\n lon_lat_fn = is_loc_file_present(country_iso)[0]\n lon_lat_df = pd.read_csv(lon_lat_fn)\n lon_df = lon_lat_df['lon']\n lat_df = lon_lat_df['lat']\n\n # set output directory\n output_dir = is_loc_file_present(country_iso)[1]\n # print(\"output_dir\", output_dir)\n os.chdir(output_dir)\n\n # create an instance of soil connector\n soil_conn = SoilConnector(soil_dir)\n depth_arg = 600\n win_size = 20\n format_arg = \"dssat\"\n error_codes = [-89, -99]\n # how many rows do we need to loop through\n num_rows = lon_lat_df.shape[0]\n\n # naming convention: TH_000000*\n name_conv = country_iso + '_' + (len(str(num_rows)) + 1) * '0'\n len_name_conv = len(name_conv)\n\n # Manufacture each dynamic .SOL for each point\n for row_i in range(num_rows):\n # get them lon, lat\n lon = lon_df.iloc[row_i]\n lat = lat_df.iloc[row_i]\n\n # create the dynamic .SOL for this point\n soil_dssat = soil_conn.get_soil_property(lon, lat, depth_arg, win_size, format_arg)\n\n # Watch out for sea values:\n if soil_dssat in error_codes:\n continue\n\n # fix the code in TH.SOL:\n digt = str(row_i + 1)\n len_i = len(digt)\n cut_at = len_name_conv - len_i\n cut_val = name_conv[:cut_at]\n new_code = cut_val + digt\n fix_code_num_in_sol(new_code, soil_dssat)\n\n new_name_i = str(output_dir) + '/' + new_code + '.SOLD'\n\n copyfile(soil_dssat, new_name_i)\n print('Writing: ', new_name_i)\n # if row_i == 2:\n # break\n\n # Main static file\n dot_sol_output = str(output_dir) + '/' + country_iso + '.SOL'\n dot_sol_path = merge_all_dot_sol(output_dir, dot_sol_output, num_rows)\n\n # remove dynamic .SOL\n remove_dynamic_dot_sol(output_dir)\n\n return dot_sol_path\n\n\ndef fix_code_num_in_sol(new_code, sol_file):\n \"\"\"\n The dynamic .SOL has a hardcoded codename (e.g. TH_00001). We need to change that\n :param new_code: new code to substitute\n :param sol_file: the file to correct\n :return:\n \"\"\"\n from_file = open(sol_file)\n hline = from_file.readline()\n\n hline_new = \"*\" + new_code + hline[12:]\n\n to_file = open(sol_file, mode=\"w\")\n to_file.write(hline_new)\n copyfileobj(from_file, to_file)\n # print(hline)\n # print(hline_new)\n\n\ndef merge_all_dot_sol(outputs_dir, dot_sol_output, num_rows):\n \"\"\"\n Merge all .SOL into one\n :param outputs_dir: directory containing the dynamic .SOL\n :param dot_sol_output: file to write the content to. This is the static .SOL\n :param num_rows: number of points. This is to make sure that all dynamic SOL are written\n :return: pathname to the final .SOL file\n \"\"\"\n # get all the dynamic .SOL\n match = str(outputs_dir) + '/*.SOLD'\n\n all_dot_sols = glob.glob(match)\n all_dot_sols.sort()\n\n # for i in all_dot_sols:\n # print(i)\n\n # if len(all_dot_sols) != num_cells: # something wrong\n # print(\"Stopping: The static database seems not complete\")\n # return\n\n with open(dot_sol_output, \"wb\") as outfile:\n for f in all_dot_sols:\n with open(f, \"rb\") as infile:\n outfile.write(infile.read())\n outfile.write('\\n'.encode())\n dot_sol_output = str(outputs_dir) + '/' + dot_sol_output\n return Path(dot_sol_output)\n\n\ndef is_loc_file_present(country_iso):\n \"\"\"\n checks if the file containing the lat and lon values of a country is present\n :param country_iso: a 3 string value: representing the iso code of a country. e.g; tha for Thailand\n :return: pathname a tuple if file exists, None otherwise\n \"\"\"\n global dirname\n\n country_iso = country_iso.lower()\n script_dir = dirname #os.getcwd()\n # script_dir = os.path.abspath(os.path.dirname(script_dir))\n\n output_dir = script_dir + '/outputs/'\n output_dir = Path(output_dir)\n\n locs_dir = script_dir + '/locs/'\n loc_file = locs_dir + country_iso + '_lon_lat_centers.csv'\n\n # print('script_dir:',script_dir)\n # print('loc_file:',loc_file)\n # print('dirname:',dirname)\n\n # loc_file = Path(loc_file)\n if os.path.exists(loc_file):\n return loc_file, output_dir\n else:\n return\n\n\ndef remove_dynamic_dot_sol(dot_sol_dir):\n \"\"\"\n Remove dynamic .SOL's\n :param dot_sol_dir: directory containing the dynamic .SOL\n :return: None\n \"\"\"\n match = str(dot_sol_dir) + '/*.SOLD'\n\n all_dot_sols = glob.glob(match)\n for fl in all_dot_sols:\n # print(os.path.isfile(fl), fl)\n if os.path.isfile(fl):\n os.remove(fl)\n else:\n print(\"Error - deleting:\", fl)\n\n\ndef is_soil_layers_present(script_path):\n global dirname\n\n script_dir = os.path.abspath(os.path.dirname(script_path))\n dirname = script_dir\n layers_dir = script_dir + '/soilproperties/'\n\n if not Path(layers_dir).exists():\n return\n # print(\"layers_dir\", Path(layers_dir).exists(), layers_dir)\n return layers_dir\n\n\ndef main():\n path_name = '/home/eusojk/Downloads/layers/soilproperties/'\n # print(make_static_soil_db(path_name))\n\n arg1 = \"/home/eusojk/PycharmProjects/soil_apis/outputs\"\n # arg2 = 'TTT.SOL'\n # merge_all_dot_sol(arg1, arg2)\n\n # remove_dynamic_dot_sol(arg1)\n script_path = sys.argv[0]\n soil_dir = is_soil_layers_present(script_path)\n\n if soil_dir is None:\n print(\n \"\\n The 'soilproperties' directory is missing. Please download the zip file and place it in your project directory.\")\n return\n\n parser = argparse.ArgumentParser(\n description=\"This script creates a static soil database .SOL\"\n )\n parser.add_argument(\"--country\", type=str, required=True, help=\"country, e.g. Thailand\")\n country_arg = vars(parser.parse_args())['country']\n # print(country_arg, soil_dir)\n\n make_static_soil_db(soil_dir, country_arg)\n\n\nif __name__ == '__main__':\n # main()\n try:\n main()\n except KeyboardInterrupt:\n print()\n print('Script Interrupted. No static SOL produced :( ')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hyruuk/NeuroPy-MLToolbox | [
"47aa6340cdf510ff2ecc9415a1863902040a0896"
] | [
"mlneurotools/utils.py"
] | [
"\"\"\"collection of useful tools that help with setting up a pipeline\n\nAuthor: Arthur Dehgan\"\"\"\nimport time\nimport functools\nimport numpy as np\nfrom scipy.io import loadmat\nfrom .stats import rm_outliers\n\n\ndef compute_relatives(cond1, cond2, **kwargs):\n \"\"\"Computes the relative changes.\n\n Parameters\n ----------\n cond1, cond2 : array\n Arrays of shape (n_subject x n_eletrodes) or (n_trials x n_electrodes). The arrays of data\n for the conditions.\n\n Returns\n -------\n values : list\n The calculated relative changes\n\n \"\"\"\n cond1 = np.asarray(cond1).mean(axis=0)\n cond2 = np.asarray(cond2).mean(axis=0)\n values = (cond1 - cond2) / cond2\n return values\n\n\ndef proper_loadmat(file_path):\n \"\"\"Loads using scipy.io.loadmat, and cleans some of the metadata\"\"\"\n data = loadmat(file_path)\n clean_data = {}\n for key, value in data.items():\n if not key.startswith(\"__\"):\n clean_data[key] = value.squeeze().tolist()\n return clean_data\n\n\ndef timer(func):\n \"\"\"Decorator to compute time spend for the wrapped function\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n val = func(*args, **kwargs)\n time_diff = elapsed_time(start_time, time.perf_counter())\n print('\"{}\" executed in {}'.format(func.__name__, time_diff))\n return val\n\n return wrapper\n\n\ndef create_groups(y):\n \"\"\"Generate groups from labels of shape (subject x labels).\"\"\"\n k = 0\n y = np.asarray(list(map(np.ravel, y)))\n y = np.asarray(list(map(np.asarray, y)))\n groups = []\n for sub in y:\n for _ in range(len(sub.ravel())):\n groups.append(k)\n k += 1\n groups = np.asarray(groups).ravel()\n y = np.concatenate([lab.ravel() for lab in y], axis=0).ravel()\n return y, groups\n\n\ndef prepare_data(\n data, labels=None, n_trials=None, rm_outl=None, random_state=0, zscore=False\n):\n \"\"\"prepares the data to be used in your ml pipeline.\n\n The function can return the given data after removing outliers, randomly selecting trials\n to balance subjects, and zscoring. It can also generate a labels and groups list.\n\n Parameters\n ----------\n data : list of arrays\n The data to prepare. Each entry of the list must be an array\n of shape (n_trials, n_elecs, n_samples)\n\n labels : list, optional\n The labels of the groups, the list must be of the same length as the data list, and\n indicate the label of each array. You need to set labels if n_trials is not set.\n\n n_trials : int, optional\n The number of trials to pick at random for each array in the data list. You need to set\n n_trials if labels is not set.\n\n rm_outl : int, optional\n The number of standard deviation away from the mean you want to keep. For example if\n rm_outl=3, then all the subjects that have a mean that is strictly superior or inferior to\n 3 times the std + the mean of all subjectswill be deleted. If rm_outl is None, no outlier\n removal will be done.\n\n random_state : int, optional\n The random_state for the random selection of trials. Not used if n_trials is None. You\n need to change random_state if you want to bootstrap and repeat the random selection\n multiple times or it will select the same subsets of trials.\n\n zscore : bool, optional, default=False\n Will zscore the data for each group if set to True.\n\n \"\"\"\n final_data = None\n if rm_outl is not None:\n data = np.asarray([rm_outliers(sub, rm_outl) for sub in data])\n\n sizes = [len(sub) for sub in data]\n if n_trials is not None:\n n_sub_min = min(sizes)\n if n_trials > n_sub_min:\n print(\n \"can't take {} trials, taking the minimum amout {} instead\".format(\n n_trials, n_sub_min\n )\n )\n n_trials = n_sub_min\n\n labels = np.asarray([[lab] * n_trials for lab in labels])\n elif labels is not None:\n labels = np.asarray([[labels[i]] * size for i, size in enumerate(sizes)])\n else:\n raise Exception(\n \"Error: either specify a number of trials and the \"\n + \"labels will be generated or give the original labels\"\n )\n labels, groups = create_groups(labels)\n\n for submat in data:\n if submat.shape[0] == 1:\n submat = submat.ravel()\n if n_trials is not None:\n index = np.random.RandomState(random_state).choice(\n range(len(submat)), n_trials, replace=False\n )\n prep_submat = submat[index]\n else:\n prep_submat = submat\n\n if zscore:\n prep_submat = zscore(prep_submat)\n\n final_data = (\n prep_submat\n if final_data is None\n else np.concatenate((prep_submat, final_data))\n )\n return np.asarray(final_data), labels, groups\n\n\ndef elapsed_time(t0, t1, formating=True):\n \"\"\"Time lapsed between t0 and t1.\n\n Returns the time (from time.time()) between t0 and t1 in a\n more readable fashion.\n\n Parameters\n ----------\n t0: float\n time.time() initial measure of time\n (eg. at the begining of the script)\n t1: float\n time.time() time at the end of the script\n or the execution of a function.\n\n \"\"\"\n lapsed = abs(t1 - t0)\n if formating:\n m, h, j = 60, 3600, 24 * 3600\n nbj = lapsed // j\n nbh = (lapsed - j * nbj) // h\n nbm = (lapsed - j * nbj - h * nbh) // m\n nbs = lapsed - j * nbj - h * nbh - m * nbm\n if lapsed > j:\n formated_time = \"{:.0f}j, {:.0f}h:{:.0f}m:{:.0f}s\".format(\n nbj, nbh, nbm, nbs\n )\n elif lapsed > h:\n formated_time = \"{:.0f}h:{:.0f}m:{:.0f}s\".format(nbh, nbm, nbs)\n elif lapsed > m:\n formated_time = \"{:.0f}m:{:.0f}s\".format(nbm, nbs)\n else:\n formated_time = \"{:.4f}s\".format(nbs)\n return formated_time\n return lapsed\n"
] | [
[
"numpy.asarray",
"numpy.random.RandomState",
"scipy.io.loadmat",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
navigator8972/pymanopt | [
"b9f53fa2d187c22ae75f65c71aeeb2bfa8b9c37f"
] | [
"tests/manifolds/test_positive_matrices.py"
] | [
"import autograd.numpy as np\nfrom numpy import testing as np_testing\n\nfrom pymanopt.manifolds import Positive\n\nfrom ._manifold_tests import ManifoldTestCase\n\n\nclass TestPositiveVectors(ManifoldTestCase):\n def setUp(self):\n self.m = m = 3\n self.n = n = 1\n self.k = k = 2\n self.manifold = Positive(m, n, k=k)\n\n super().setUp()\n\n def test_inner_product(self):\n x = self.manifold.random_point()\n g = self.manifold.random_tangent_vector(x)\n h = self.manifold.random_tangent_vector(x)\n assert self.manifold.inner_product(\n x, g, h\n ) == self.manifold.inner_product(x, h, g)\n\n def test_norm(self):\n x = self.manifold.random_point()\n u = self.manifold.random_tangent_vector(x)\n assert self.manifold.norm(x, u) > 0\n\n def test_random_point(self):\n # Just make sure that things generated are on the manifold\n # and that if you generate two they are not equal.\n x = self.manifold.random_point()\n assert (x > 0).all()\n y = self.manifold.random_point()\n assert (self.manifold.dist(x, y)).all() > 1e-6\n\n def test_random_tangent_vector(self):\n # Just make sure that if you generate two they are not equal.\n # check also if unit norm\n x = self.manifold.random_point()\n g = self.manifold.random_tangent_vector(x)\n h = self.manifold.random_tangent_vector(x)\n assert (np.linalg.norm(g - h, axis=(1, 2)) > 1e-6).all()\n\n def test_dist(self):\n # To implement norm of log(x, y)\n x = self.manifold.random_point()\n y = self.manifold.random_point()\n u = self.manifold.log(x, y)\n np_testing.assert_almost_equal(\n self.manifold.norm(x, u), self.manifold.dist(x, y)\n )\n\n def test_exp_log_inverse(self):\n x = self.manifold.random_point()\n y = self.manifold.random_point()\n u = self.manifold.log(x, y)\n z = self.manifold.exp(x, u)\n np_testing.assert_almost_equal(self.manifold.dist(y, z), 0)\n\n def test_log_exp_inverse(self):\n x = self.manifold.random_point()\n u = self.manifold.random_tangent_vector(x)\n y = self.manifold.exp(x, u)\n v = self.manifold.log(x, y)\n np_testing.assert_almost_equal(self.manifold.norm(x, u - v), 0)\n\n def test_retraction(self):\n # Test that the result is on the manifold and that for small\n # tangent vectors it has little effect.\n x = self.manifold.random_point()\n u = self.manifold.random_tangent_vector(x)\n\n xretru = self.manifold.retraction(x, u)\n\n assert (xretru > 0).all()\n\n u = u * 1e-6\n xretru = self.manifold.retraction(x, u)\n np_testing.assert_allclose(xretru, x + u)\n\n def test_euclidean_to_riemannian_gradient_from_cost(self):\n self.run_gradient_test()\n"
] | [
[
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CowherdChris/droidlet | [
"8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1"
] | [
"droidlet/lowlevel/locobot/locobot_mover.py"
] | [
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nimport os\nimport sys\nimport math\nimport copy\nimport time\nimport logging\nfrom collections.abc import Iterable\nfrom prettytable import PrettyTable\nimport Pyro4\nimport numpy as np\n\nif \"/opt/ros/kinetic/lib/python2.7/dist-packages\" in sys.path:\n sys.path.remove(\"/opt/ros/kinetic/lib/python2.7/dist-packages\")\n\nfrom droidlet.shared_data_structs import ErrorWithResponse\nfrom agents.argument_parser import ArgumentParser\nfrom droidlet.shared_data_structs import RGBDepth\n\nfrom .locobot_mover_utils import (\n get_camera_angles,\n angle_diff,\n MAX_PAN_RAD,\n CAMERA_HEIGHT,\n ARM_HEIGHT,\n transform_pose,\n base_canonical_coords_to_pyrobot_coords,\n xyz_pyrobot_to_canonical_coords,\n)\nfrom tenacity import retry, stop_after_attempt, wait_fixed\n\nPyro4.config.SERIALIZER = \"pickle\"\nPyro4.config.SERIALIZERS_ACCEPTED.add(\"pickle\")\n\n\n@retry(reraise=True, stop=stop_after_attempt(5), wait=wait_fixed(0.5))\ndef safe_call(f, *args):\n try:\n return f(*args)\n except Pyro4.errors.ConnectionClosedError as e:\n msg = \"{} - {}\".format(f._RemoteMethod__name, e)\n raise ErrorWithResponse(msg)\n\n\nclass LoCoBotMover:\n \"\"\"Implements methods that call the physical interfaces of the Locobot.\n\n Arguments:\n ip (string): IP of the Locobot.\n backend (string): backend where the Locobot lives, either \"habitat\" or \"locobot\"\n \"\"\"\n\n def __init__(self, ip=None, backend=\"locobot\"):\n self.bot = Pyro4.Proxy(\"PYRONAME:remotelocobot@\" + ip)\n self.close_loop = False if backend == \"habitat\" else True\n self.curr_look_dir = np.array([0, 0, 1]) # initial look dir is along the z-axis\n\n intrinsic_mat = safe_call(self.bot.get_intrinsics)\n intrinsic_mat_inv = np.linalg.inv(intrinsic_mat)\n img_resolution = safe_call(self.bot.get_img_resolution)\n img_pixs = np.mgrid[0 : img_resolution[0] : 1, 0 : img_resolution[1] : 1]\n img_pixs = img_pixs.reshape(2, -1)\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\n uv_one = np.concatenate((img_pixs, np.ones((1, img_pixs.shape[1]))))\n self.uv_one_in_cam = np.dot(intrinsic_mat_inv, uv_one)\n self.backend = backend\n\n def check(self):\n \"\"\"\n Sanity checks all the mover interfaces.\n\n Checks move by moving the locobot around in a square and reporting L1 drift and total time taken\n for the three movement modes available to the locobot - using PyRobot slam (vslam),\n using Droidlet slam (dslam) and without using any slam (default)\n Checks look and point by poiting and looking at the same target.\n \"\"\"\n self.reset_camera()\n table = PrettyTable([\"Command\", \"L1 Drift (meters)\", \"Time (sec)\"])\n sq_table = PrettyTable([\"Mode\", \"Total L1 drift (meters)\", \"Total time (sec)\"])\n\n def l1_drift(a, b):\n return round(abs(a[0] - b[0]) + abs(a[1] - b[1]), ndigits=3)\n\n def execute_move(init_pos, dest_pos, cmd_text, use_map=False, use_dslam=False):\n logging.info(\"Executing {} ... \".format(cmd_text))\n start = time.time()\n self.move_absolute([dest_pos], use_map=use_map, use_dslam=use_dslam)\n end = time.time()\n tt = round((end - start), ndigits=3)\n pos_after = self.get_base_pos_in_canonical_coords()\n drift = l1_drift(pos_after, dest_pos)\n logging.info(\"Finished Executing. \\nDrift: {} Time taken: {}\".format(drift, tt))\n table.add_row([cmd_text, drift, tt])\n return drift, tt\n\n def move_in_a_square(magic_text, side=0.3, use_vslam=False, use_dslam=False):\n \"\"\"\n Moves the locobot in a square starting from the bottom right - goes left, forward, right, back.\n\n Args:\n magic_text (str): unique text to differentiate each scenario\n side (float): side of the square\n\n Returns:\n total L1 drift, total time taken to move around the square.\n \"\"\"\n pos = self.get_base_pos_in_canonical_coords()\n logging.info(\"Initial agent pos {}\".format(pos))\n dl, tl = execute_move(\n pos,\n [pos[0] - side, pos[1], pos[2]],\n \"Move Left \" + magic_text,\n use_map=use_vslam,\n use_dslam=use_dslam,\n )\n df, tf = execute_move(\n pos,\n [pos[0] - side, pos[1] + side, pos[2]],\n \"Move Forward \" + magic_text,\n use_map=use_vslam,\n use_dslam=use_dslam,\n )\n dr, tr = execute_move(\n pos,\n [pos[0], pos[1] + side, pos[2]],\n \"Move Right \" + magic_text,\n use_map=use_vslam,\n use_dslam=use_dslam,\n )\n db, tb = execute_move(\n pos,\n [pos[0], pos[1], pos[2]],\n \"Move Backward \" + magic_text,\n use_map=use_vslam,\n use_dslam=use_dslam,\n )\n return dl + df + dr + db, tl + tf + tr + tb\n\n # move in a square of side 0.3 starting at current base pos\n d, t = move_in_a_square(\"default\", side=0.3, use_vslam=False, use_dslam=False)\n sq_table.add_row([\"default\", d, t])\n\n d, t = move_in_a_square(\"use_vslam\", side=0.3, use_vslam=True, use_dslam=False)\n sq_table.add_row([\"use_vslam\", d, t])\n\n d, t = move_in_a_square(\"use_dslam\", side=0.3, use_vslam=False, use_dslam=True)\n sq_table.add_row([\"use_dslam\", d, t])\n\n print(table)\n print(sq_table)\n\n # Check that look & point are at the same target\n logging.info(\"Visually check that look and point are at the same target\")\n pos = self.get_base_pos_in_canonical_coords()\n look_pt_target = [pos[0] + 0.5, 1, pos[1] + 1]\n\n # look\n self.look_at(look_pt_target, 0, 0)\n logging.info(\"Completed Look at.\")\n\n # point\n self.point_at(look_pt_target)\n logging.info(\"Completed Point.\")\n\n # TODO/FIXME! instead of just True/False, return diagnostic messages\n # so e.g. if a grip attempt fails, the task is finished, but the status is a failure\n def bot_step(self):\n try:\n f = self.bot.command_finished()\n except:\n # do better here?\n f = True\n return f\n\n def get_pan(self):\n \"\"\"get yaw in radians.\"\"\"\n return self.bot.get_pan()\n\n def get_tilt(self):\n \"\"\"get pitch in radians.\"\"\"\n return self.bot.get_tilt()\n\n def reset_camera(self):\n \"\"\"reset the camera to 0 pan and tilt.\"\"\"\n return self.bot.reset()\n\n def move_relative(self, xyt_positions, use_dslam=True):\n \"\"\"Command to execute a relative move.\n\n Args:\n xyt_positions: a list of relative (x,y,yaw) positions for the bot to execute.\n x,y,yaw are in the pyrobot's coordinates.\n \"\"\"\n if not isinstance(next(iter(xyt_positions)), Iterable):\n # single xyt position given\n xyt_positions = [xyt_positions]\n for xyt in xyt_positions:\n self.bot.go_to_relative(xyt, close_loop=self.close_loop, use_dslam=use_dslam)\n while not self.bot.command_finished():\n print(self.bot.get_base_state(\"odom\"))\n\n def move_absolute(self, xyt_positions, use_map=False, use_dslam=True):\n \"\"\"Command to execute a move to an absolute position.\n\n It receives positions in canonical world coordinates and converts them to pyrobot's coordinates\n before calling the bot APIs.\n\n Args:\n xyt_positions: a list of (x_c,y_c,yaw) positions for the bot to move to.\n (x_c,y_c,yaw) are in the canonical world coordinates.\n \"\"\"\n if not isinstance(next(iter(xyt_positions)), Iterable):\n # single xyt position given\n xyt_positions = [xyt_positions]\n for xyt in xyt_positions:\n logging.info(\"Move absolute in canonical coordinates {}\".format(xyt))\n self.bot.go_to_absolute(\n base_canonical_coords_to_pyrobot_coords(xyt),\n close_loop=self.close_loop,\n use_map=use_map,\n use_dslam=use_dslam,\n )\n start_base_state = self.get_base_pos_in_canonical_coords()\n while not self.bot.command_finished():\n print(self.get_base_pos_in_canonical_coords())\n\n end_base_state = self.get_base_pos_in_canonical_coords()\n logging.info(\n \"start {}, end {}, diff {}\".format(\n start_base_state,\n end_base_state,\n [b - a for a, b in zip(start_base_state, end_base_state)],\n )\n )\n return \"finished\"\n\n def look_at(self, obj_pos, yaw_deg, pitch_deg):\n \"\"\"Executes \"look at\" by setting the pan, tilt of the camera or turning the base if required.\n\n Uses both the base state and object coordinates in canonical world coordinates to calculate\n expected yaw and pitch.\n\n Args:\n obj_pos (list): object coordinates as saved in memory.\n yaw_deg (float): yaw in degrees\n pitch_deg (float): pitch in degrees\n\n Returns:\n string \"finished\"\n \"\"\"\n pan_rad, tilt_rad = 0.0, 0.0\n old_pan = self.get_pan()\n old_tilt = self.get_tilt()\n pos = self.get_base_pos_in_canonical_coords()\n logging.info(f\"Current Locobot state (x, z, yaw): {pos}\")\n if yaw_deg:\n pan_rad = old_pan - float(yaw_deg) * np.pi / 180\n if pitch_deg:\n tilt_rad = old_tilt - float(pitch_deg) * np.pi / 180\n if obj_pos is not None:\n logging.info(f\"looking at x,y,z: {obj_pos}\")\n pan_rad, tilt_rad = get_camera_angles([pos[0], CAMERA_HEIGHT, pos[1]], obj_pos)\n logging.info(f\"Returned new pan and tilt angles (radians): ({pan_rad}, {tilt_rad})\")\n\n # FIXME!!! more async; move head and body at the same time\n head_res = angle_diff(pos[2], pan_rad)\n if np.abs(head_res) > MAX_PAN_RAD:\n dyaw = np.sign(head_res) * (np.abs(head_res) - MAX_PAN_RAD)\n self.turn(dyaw)\n pan_rad = np.sign(head_res) * MAX_PAN_RAD\n else:\n pan_rad = head_res\n logging.info(f\"Camera new pan and tilt angles (radians): ({pan_rad}, {tilt_rad})\")\n self.bot.set_pan_tilt(pan_rad, np.clip(tilt_rad, tilt_rad, 0.9))\n logging.debug(f\"locobot pan and tilt now: ({self.bot.get_camera_state()})\")\n\n return \"finished\"\n\n def point_at(self, target_pos):\n \"\"\"Executes pointing the arm at the specified target pos.\n\n Args:\n target_pos ([x,y,z]): canonical coordinates to point to.\n\n Returns:\n string \"finished\"\n \"\"\"\n pos = self.get_base_pos_in_canonical_coords()\n yaw_rad, pitch_rad = get_camera_angles([pos[0], ARM_HEIGHT, pos[1]], target_pos)\n states = [\n [yaw_rad, 0.0, pitch_rad, 0.0, 0.0],\n [yaw_rad, 0.0, pitch_rad, -0.2, 0.0],\n [0.0, -math.pi / 4.0, math.pi / 2.0, 0.0, 0.0], # reset joint position\n ]\n for state in states:\n self.bot.set_joint_positions(state, plan=False)\n while not self.bot.command_finished():\n pass\n return \"finished\"\n\n def get_base_pos_in_canonical_coords(self):\n \"\"\"get the current Locobot position in the canonical coordinate system\n instead of the Locobot's global coordinates as stated in the Locobot\n documentation: https://www.pyrobot.org/docs/navigation.\n\n the standard coordinate systems:\n Camera looks at (0, 0, 1),\n its right direction is (1, 0, 0) and\n its up-direction is (0, 1, 0)\n\n return:\n (x, z, yaw) of the Locobot base in standard coordinates\n \"\"\"\n\n x_global, y_global, yaw = safe_call(self.bot.get_base_state, \"odom\")\n x_standard = -y_global\n z_standard = x_global\n return np.array([x_standard, z_standard, yaw])\n\n def get_base_pos(self):\n \"\"\"Return Locobot (x, y, yaw) in the robot base coordinates as\n illustrated in the docs:\n\n https://www.pyrobot.org/docs/navigation\n \"\"\"\n return self.bot.get_base_state(\"odom\")\n\n def get_rgb_depth(self):\n \"\"\"Fetches rgb, depth and pointcloud in pyrobot world coordinates.\n\n Returns:\n an RGBDepth object\n \"\"\"\n rgb, depth, rot, trans = self.bot.get_pcd_data()\n depth = depth.astype(np.float32)\n d = copy.deepcopy(depth)\n depth /= 1000.0\n depth = depth.reshape(-1)\n pts_in_cam = np.multiply(self.uv_one_in_cam, depth)\n pts_in_cam = np.concatenate((pts_in_cam, np.ones((1, pts_in_cam.shape[1]))), axis=0)\n pts = pts_in_cam[:3, :].T\n pts = np.dot(pts, rot.T)\n pts = pts + trans.reshape(-1)\n if self.backend == \"habitat\":\n ros_to_habitat_frame = np.array([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]])\n pts = ros_to_habitat_frame.T @ pts.T\n pts = pts.T\n pts = transform_pose(pts, self.bot.get_base_state(\"odom\"))\n logging.info(\"Fetched all camera sensor input.\")\n return RGBDepth(rgb, d, pts)\n\n def dance(self):\n self.bot.dance()\n\n def turn(self, yaw):\n \"\"\"turns the bot by the yaw specified.\n\n Args:\n yaw: the yaw to execute in degree.\n \"\"\"\n turn_rad = yaw * math.pi / 180\n self.bot.go_to_relative([0, 0, turn_rad], close_loop=self.close_loop)\n\n def grab_nearby_object(self, bounding_box=[(240, 480), (100, 540)]):\n \"\"\"\n :param bounding_box: region in image to search for grasp\n \"\"\"\n return self.bot.grasp(bounding_box)\n\n def is_object_in_gripper(self):\n return self.bot.get_gripper_state() == 2\n\n def explore(self):\n return self.bot.explore()\n\n def drop(self):\n return self.bot.open_gripper()\n\n def get_obstacles_in_canonical_coords(self):\n \"\"\"get the positions of obtacles position in the canonical coordinate system\n instead of the Locobot's global coordinates as stated in the Locobot\n documentation: https://www.pyrobot.org/docs/navigation or\n https://github.com/facebookresearch/pyrobot/blob/master/docs/website/docs/ex_navigation.md\n\n the standard coordinate systems:\n Camera looks at (0, 0, 1),\n its right direction is (1, 0, 0) and\n its up-direction is (0, 1, 0)\n\n return:\n list[(x, z)] of the obstacle location in standard coordinates\n \"\"\"\n cordinates_in_robot_frame = self.bot.get_map()\n cordinates_in_standard_frame = [\n xyz_pyrobot_to_canonical_coords(list(c) + [0.0]) for c in cordinates_in_robot_frame\n ]\n cordinates_in_standard_frame = [(c[0], c[2]) for c in cordinates_in_standard_frame]\n return cordinates_in_standard_frame\n\n\nif __name__ == \"__main__\":\n base_path = os.path.dirname(__file__)\n parser = ArgumentParser(\"Locobot\", base_path)\n opts = parser.parse()\n mover = LoCoBotMover(ip=opts.ip, backend=opts.backend)\n if opts.check_controller:\n mover.check()\n"
] | [
[
"numpy.dot",
"numpy.abs",
"numpy.multiply",
"numpy.clip",
"numpy.linalg.inv",
"numpy.ones",
"numpy.sign",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewseidl/ibis | [
"1468b8c4f96d9d58f6fa147a2579b0d9e5796186"
] | [
"ibis/pandas/execution/strings.py"
] | [
"import itertools\nimport operator\n\nfrom functools import reduce\n\nimport regex as re\n\nimport numpy as np\nimport pandas as pd\n\nimport toolz\n\nfrom pandas.core.groupby import SeriesGroupBy\n\nimport ibis\n\nimport ibis.expr.operations as ops\n\nfrom ibis.pandas.dispatch import execute_node\nfrom ibis.pandas.core import integer_types, scalar_types\n\n\n@execute_node.register(ops.StringLength, pd.Series)\ndef execute_string_length_series(op, data, **kwargs):\n return data.str.len().astype('int32')\n\n\n@execute_node.register(ops.Substring, pd.Series, integer_types, integer_types)\ndef execute_substring_int_int(op, data, start, length, **kwargs):\n return data.str[start:start + length]\n\n\n@execute_node.register(ops.Substring, pd.Series, pd.Series, integer_types)\ndef execute_substring_series_int(op, data, start, length, **kwargs):\n return execute_substring_series_series(\n op, data, start, pd.Series(np.repeat(length, len(start))), **kwargs)\n\n\n@execute_node.register(ops.Substring, pd.Series, integer_types, pd.Series)\ndef execute_string_substring_int_series(op, data, start, length, **kwargs):\n return execute_substring_series_series(\n op, data, pd.Series(np.repeat(start, len(length))), length, **kwargs)\n\n\n@execute_node.register(ops.Substring, pd.Series, pd.Series, pd.Series)\ndef execute_substring_series_series(op, data, start, length, **kwargs):\n end = start + length\n\n def iterate(value,\n start_iter=start.values.flat,\n end_iter=end.values.flat):\n begin = next(start_iter)\n end = next(end_iter)\n if (begin is not None and pd.isnull(begin)) or (\n end is not None and pd.isnull(end)):\n return None\n return value[begin:end]\n return data.map(iterate)\n\n\n@execute_node.register(ops.Strip, pd.Series)\ndef execute_string_strip(op, data, **kwargs):\n return data.str.strip()\n\n\n@execute_node.register(ops.LStrip, pd.Series)\ndef execute_string_lstrip(op, data, **kwargs):\n return data.str.lstrip()\n\n\n@execute_node.register(ops.RStrip, pd.Series)\ndef execute_string_rstrip(op, data, **kwargs):\n return data.str.rstrip()\n\n\n@execute_node.register(\n ops.LPad,\n pd.Series,\n (pd.Series,) + integer_types,\n (pd.Series, str)\n)\ndef execute_string_lpad(op, data, length, pad, **kwargs):\n return data.str.pad(length, side='left', fillchar=pad)\n\n\n@execute_node.register(\n ops.RPad,\n pd.Series,\n (pd.Series,) + integer_types,\n (pd.Series, str)\n)\ndef execute_string_rpad(op, data, length, pad, **kwargs):\n return data.str.pad(length, side='right', fillchar=pad)\n\n\n@execute_node.register(ops.Reverse, pd.Series)\ndef execute_string_reverse(op, data, **kwargs):\n return data.str[::-1]\n\n\n@execute_node.register(ops.Lowercase, pd.Series)\ndef execute_string_lower(op, data, **kwargs):\n return data.str.lower()\n\n\n@execute_node.register(ops.Uppercase, pd.Series)\ndef execute_string_upper(op, data, **kwargs):\n return data.str.upper()\n\n\n@execute_node.register(ops.Capitalize, pd.Series)\ndef execute_string_capitalize(op, data, **kwargs):\n return data.str.capitalize()\n\n\n@execute_node.register(ops.Repeat, pd.Series, (pd.Series,) + integer_types)\ndef execute_string_repeat(op, data, times, **kwargs):\n return data.str.repeat(times)\n\n\n@execute_node.register(\n ops.StringFind,\n pd.Series,\n (pd.Series, str),\n (pd.Series, type(None)) + integer_types,\n (pd.Series, type(None)) + integer_types,\n)\ndef execute_string_contains(op, data, needle, start, end, **kwargs):\n return data.str.find(needle, start, end)\n\n\ndef _sql_like_to_regex(pattern, escape):\n cur_i = 0\n pattern_length = len(pattern)\n\n while cur_i < pattern_length:\n nxt_i = cur_i + 1\n\n cur = pattern[cur_i]\n nxt = pattern[nxt_i] if nxt_i < pattern_length else None\n\n skip = 1\n\n if nxt is not None and escape is not None and cur == escape:\n yield nxt\n skip = 2\n elif cur == '%':\n yield '.*'\n elif cur == '_':\n yield '.'\n else:\n yield cur\n\n cur_i += skip\n\n\ndef sql_like_to_regex(pattern, escape=None):\n \"\"\"Convert a SQL LIKE pattern to an equivalent Python regular expression.\n\n Parameters\n ----------\n pattern : str\n A LIKE pattern with the following semantics:\n * ``%`` matches zero or more characters\n * ``_`` matches exactly one character\n * To escape ``%`` and ``_`` (or to match the `escape` parameter\n itself), prefix the desired character with `escape`.\n\n Returns\n -------\n new_pattern : str\n A regular expression pattern equivalent to the input SQL LIKE pattern.\n\n Examples\n --------\n >>> sql_like_to_regex('6%') # default is to not escape anything\n '^6.*$'\n >>> sql_like_to_regex('6^%', escape='^')\n '^6%$'\n >>> sql_like_to_regex('6_')\n '^6.$'\n >>> sql_like_to_regex('6/_', escape='/')\n '^6_$'\n >>> sql_like_to_regex('%abc') # any string ending with \"abc\"\n '^.*abc$'\n >>> sql_like_to_regex('abc%') # any string starting with \"abc\"\n '^abc.*$'\n \"\"\"\n return '^{}$'.format(''.join(_sql_like_to_regex(pattern, escape)))\n\n\n@execute_node.register(\n ops.StringSQLLike,\n pd.Series, str, (str, type(None))\n)\ndef execute_string_like_series_string(op, data, pattern, escape, **kwargs):\n new_pattern = re.compile(sql_like_to_regex(pattern, escape=escape))\n return data.map(\n lambda x, pattern=new_pattern: pattern.search(x) is not None\n )\n\n\n@execute_node.register(\n ops.StringSQLLike, SeriesGroupBy, str, str\n)\ndef execute_string_like_series_groupby_string(\n op, data, pattern, escape, **kwargs\n):\n return execute_string_like_series_string(\n op, data.obj, pattern, escape, **kwargs\n ).groupby(data.grouper.groupings)\n\n\n@execute_node.register(\n ops.GroupConcat,\n pd.Series, str, (pd.Series, type(None))\n)\ndef execute_group_concat_series_mask(\n op, data, sep, mask, aggcontext=None, **kwargs\n):\n return aggcontext.agg(data[mask] if mask is not None else data, sep.join)\n\n\n@execute_node.register(\n ops.GroupConcat, SeriesGroupBy, str, type(None)\n)\ndef execute_group_concat_series_gb(\n op, data, sep, _, aggcontext=None, **kwargs\n):\n return aggcontext.agg(\n data, lambda data, sep=sep: sep.join(data.astype(str)))\n\n\n@execute_node.register(\n ops.GroupConcat, SeriesGroupBy, str, SeriesGroupBy\n)\ndef execute_group_concat_series_gb_mask(\n op, data, sep, mask, aggcontext=None, **kwargs\n):\n method = lambda x, sep=sep: sep.join(x.astype(str)) # noqa: E731\n return aggcontext.agg(\n data,\n lambda data, mask=mask.obj, method=method: method(\n data[mask[data.index]]\n ),\n )\n\n\n@execute_node.register(ops.StringAscii, pd.Series)\ndef execute_string_ascii(op, data, **kwargs):\n return data.map(ord).astype('int32')\n\n\n@execute_node.register(ops.StringAscii, SeriesGroupBy)\ndef execute_string_ascii_group_by(op, data, **kwargs):\n return execute_string_ascii(\n op, data, **kwargs\n ).groupby(data.grouper.groupings)\n\n\n@execute_node.register(ops.RegexSearch, pd.Series, str)\ndef execute_series_regex_search(op, data, pattern, **kwargs):\n return data.map(\n lambda x, pattern=re.compile(pattern): pattern.search(x) is not None\n )\n\n\n@execute_node.register(ops.RegexSearch, SeriesGroupBy, str)\ndef execute_series_regex_search_gb(op, data, pattern, **kwargs):\n return execute_series_regex_search(\n op, data, getattr(pattern, 'obj', pattern), **kwargs\n ).groupby(data.grouper.groupings)\n\n\n@execute_node.register(\n ops.RegexExtract,\n pd.Series,\n (pd.Series, str),\n integer_types,\n)\ndef execute_series_regex_extract(op, data, pattern, index, **kwargs):\n def extract(x, pattern=re.compile(pattern), index=index):\n match = pattern.match(x)\n if match is not None:\n return match.group(index) or np.nan\n return np.nan\n\n extracted = data.apply(extract)\n return extracted\n\n\n@execute_node.register(\n ops.RegexExtract,\n SeriesGroupBy,\n str,\n integer_types,\n)\ndef execute_series_regex_extract_gb(op, data, pattern, index, **kwargs):\n return execute_series_regex_extract(\n op,\n data.obj,\n pattern,\n index,\n **kwargs\n ).groupby(data.grouper.groupings)\n\n\n@execute_node.register(\n ops.RegexReplace,\n pd.Series,\n str,\n str,\n)\ndef execute_series_regex_replace(op, data, pattern, replacement, **kwargs):\n def replacer(x, pattern=re.compile(pattern)):\n return pattern.sub(replacement, x)\n return data.apply(replacer)\n\n\n@execute_node.register(\n ops.RegexReplace,\n SeriesGroupBy,\n str,\n str,\n)\ndef execute_series_regex_replace_gb(op, data, pattern, replacement, **kwargs):\n return execute_series_regex_replace(\n data.obj,\n pattern,\n replacement,\n **kwargs\n ).groupby(data.grouper.groupings)\n\n\n@execute_node.register(ops.Translate, pd.Series, pd.Series, pd.Series)\ndef execute_series_translate_series_series(\n op, data, from_string, to_string, **kwargs\n):\n to_string_iter = iter(to_string)\n table = from_string.apply(\n lambda x, y: str.maketrans(x, y=next(y)), args=(to_string_iter,)\n )\n return data.str.translate(table)\n\n\n@execute_node.register(\n ops.Translate,\n pd.Series,\n pd.Series,\n str,\n)\ndef execute_series_translate_series_scalar(\n op, data, from_string, to_string, **kwargs\n):\n table = from_string.map(lambda x, y=to_string: str.maketrans(x=x, y=y))\n return data.str.translate(table)\n\n\n@execute_node.register(\n ops.Translate,\n pd.Series,\n str,\n pd.Series,\n)\ndef execute_series_translate_scalar_series(\n op, data, from_string, to_string, **kwargs\n):\n table = to_string.map(lambda y, x=from_string: str.maketrans(x=x, y=y))\n return data.str.translate(table)\n\n\n@execute_node.register(\n ops.Translate,\n pd.Series,\n str,\n str,\n)\ndef execute_series_translate_scalar_scalar(\n op, data, from_string, to_string, **kwargs\n):\n return data.str.translate(str.maketrans(from_string, to_string))\n\n\n@execute_node.register(\n ops.StrRight,\n pd.Series,\n integer_types,\n)\ndef execute_series_right(op, data, nchars, **kwargs):\n return data.str[-nchars:]\n\n\n@execute_node.register(ops.StrRight, SeriesGroupBy, integer_types)\ndef execute_series_right_gb(op, data, nchars, **kwargs):\n return execute_series_right(\n op, data.obj, nchars\n ).groupby(data.grouper.groupings)\n\n\n@execute_node.register(ops.StringJoin, (pd.Series, str), list)\ndef execute_series_join_scalar_sep(op, sep, data, **kwargs):\n return reduce(lambda x, y: x + sep + y, data)\n\n\ndef haystack_to_series_of_lists(haystack, index=None):\n if index is None:\n index = toolz.first(\n piece.index for piece in haystack if hasattr(piece, 'index')\n )\n pieces = reduce(\n operator.add,\n (\n pd.Series(getattr(piece, 'values', piece), index=index).map(\n ibis.util.promote_list\n ) for piece in haystack\n )\n )\n return pieces\n\n\n@execute_node.register(ops.FindInSet, pd.Series, list)\ndef execute_series_find_in_set(op, needle, haystack, **kwargs):\n pieces = haystack_to_series_of_lists(haystack, index=needle.index)\n return pieces.map(\n lambda elements, needle=needle, index=itertools.count(): (\n ibis.util.safe_index(elements, needle.iat[next(index)])\n )\n )\n\n\n@execute_node.register(ops.FindInSet, SeriesGroupBy, list)\ndef execute_series_group_by_find_in_set(op, needle, haystack, **kwargs):\n pieces = [getattr(piece, 'obj', piece) for piece in haystack]\n return execute_series_find_in_set(\n op, needle.obj, pieces, **kwargs\n ).groupby(needle.grouper.groupings)\n\n\n@execute_node.register(ops.FindInSet, scalar_types, list)\ndef execute_string_group_by_find_in_set(op, needle, haystack, **kwargs):\n # `list` could contain series, series groupbys, or scalars\n # mixing series and series groupbys is not allowed\n series_in_haystack = [\n type(piece) for piece in haystack\n if isinstance(piece, (pd.Series, SeriesGroupBy))\n ]\n\n if not series_in_haystack:\n return ibis.util.safe_index(haystack, needle)\n\n try:\n collection_type, = frozenset(map(type, series_in_haystack))\n except ValueError:\n raise ValueError('Mixing Series and SeriesGroupBy is not allowed')\n\n pieces = haystack_to_series_of_lists([\n getattr(piece, 'obj', piece) for piece in haystack\n ])\n\n result = pieces.map(toolz.flip(ibis.util.safe_index)(needle))\n if issubclass(collection_type, pd.Series):\n return result\n\n assert issubclass(collection_type, SeriesGroupBy)\n\n return result.groupby(\n toolz.first(\n piece.grouper.groupings\n for piece in haystack if hasattr(piece, 'grouper')\n )\n )\n"
] | [
[
"pandas.isnull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
UMBCvision/Contextual-Adversarial-Patches | [
"602fd267c2562f45ba65d10edb856a1144b8ca5f"
] | [
"dataset.py"
] | [
"#!/usr/bin/python\n# encoding: utf-8\n\nimport os\nimport random\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nfrom utils import read_truths_args, read_truths\nfrom image import *\n\nimport pdb\n\n\n# WARNING: for physical world attack\nclass listDataset(Dataset):\n\n def __init__(self, root, shape=None, shuffle=True, transform=None, target_transform=None, train=False, seen=0, batch_size=64, num_workers=4):\n with open(root, 'r') as file:\n self.lines = file.readlines()\n\n if shuffle:\n random.shuffle(self.lines)\n\n self.nSamples = len(self.lines)\n self.transform = transform\n self.target_transform = target_transform\n self.train = train\n self.shape = shape\n self.seen = seen\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n imgpath = self.lines[index].rstrip()\n ''' Fix the width to be 13*32=416 and do not randomize\n '''\n width = 13*32\n\n\n if self.train and index % 64== 0:\n if self.seen < 4000*64:\n width = 13*32\n self.shape = (width, width)\n elif self.seen < 8000*64:\n width = (random.randint(0,3) + 13)*32\n self.shape = (width, width)\n elif self.seen < 12000*64:\n width = (random.randint(0,5) + 12)*32\n self.shape = (width, width)\n elif self.seen < 16000*64:\n width = (random.randint(0,7) + 11)*32\n self.shape = (width, width)\n else: # self.seen < 20000*64:\n width = (random.randint(0,9) + 10)*32\n self.shape = (width, width)\n\n if self.train:\n jitter = 0.2\n hue = 0.1\n saturation = 1.5\n exposure = 1.5\n\n img, label = load_data_detection(imgpath, self.shape, jitter, hue, saturation, exposure)\n label = torch.from_numpy(label)\n else:\n img = Image.open(imgpath).convert('RGB')\n if self.shape:\n img = img.resize(self.shape)\n\n labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')\n # # for KITTI\n # labpath = imgpath.replace('images', 'labels').replace('PNGImages_cropped', 'labels_cropped_car_person').replace('.jpg', '.txt').replace('.png','.txt')\n #labpath = imgpath.replace('images', 'labels').replace('train', 'labels').replace('.jpg', '.txt').replace('.png','.txt')\n label = torch.zeros(50*5)\n try:\n tmp = torch.from_numpy(read_truths_args(labpath, 8.0/img.width).astype('float32'))\n except Exception:\n tmp = torch.zeros(1,5)\n #tmp = torch.from_numpy(read_truths(labpath))\n\n # # for KITTI\n # if tmp.size() == 0:\n # tmp = torch.zeros(1,5)\n tmp = tmp.view(-1)\n tsz = tmp.numel()\n # print('labpath = %s , tsz = %d' % (labpath, tsz))\n if tsz > 50*5:\n label = tmp[0:50*5]\n elif tsz > 0:\n label[0:tsz] = tmp\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n self.seen = self.seen + self.num_workers\n return (img, label, imgpath)\n"
] | [
[
"torch.from_numpy",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection | [
"72130ad037b900461af5be6d80b27ab29c81de5e"
] | [
"backend/microservices/TTS/VC/vocoder/vocoder_dataset.py"
] | [
"from torch.utils.data import Dataset\r\nfrom pathlib import Path\r\nfrom VC.vocoder import audio\r\nimport VC.vocoder.hparams as hp\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\nclass VocoderDataset(Dataset):\r\n def __init__(self, metadata_fpath: Path, mel_dir: Path, wav_dir: Path):\r\n print(\"Using inputs from:\\n\\t%s\\n\\t%s\\n\\t%s\" % (metadata_fpath, mel_dir, wav_dir))\r\n \r\n with metadata_fpath.open(\"r\") as metadata_file:\r\n metadata = [line.split(\"|\") for line in metadata_file]\r\n \r\n gta_fnames = [x[1] for x in metadata if int(x[4])]\r\n gta_fpaths = [mel_dir.joinpath(fname) for fname in gta_fnames]\r\n wav_fnames = [x[0] for x in metadata if int(x[4])]\r\n wav_fpaths = [wav_dir.joinpath(fname) for fname in wav_fnames]\r\n self.samples_fpaths = list(zip(gta_fpaths, wav_fpaths))\r\n \r\n print(\"Found %d samples\" % len(self.samples_fpaths))\r\n \r\n def __getitem__(self, index): \r\n mel_path, wav_path = self.samples_fpaths[index]\r\n \r\n # Load the mel spectrogram and adjust its range to [-1, 1]\r\n mel = np.load(mel_path).T.astype(np.float32) / hp.mel_max_abs_value\r\n \r\n # Load the wav\r\n wav = np.load(wav_path)\r\n if hp.apply_preemphasis:\r\n wav = audio.pre_emphasis(wav)\r\n wav = np.clip(wav, -1, 1)\r\n \r\n # Fix for missing padding # TODO: settle on whether this is any useful\r\n r_pad = (len(wav) // hp.hop_length + 1) * hp.hop_length - len(wav)\r\n wav = np.pad(wav, (0, r_pad), mode='constant')\r\n assert len(wav) >= mel.shape[1] * hp.hop_length\r\n wav = wav[:mel.shape[1] * hp.hop_length]\r\n assert len(wav) % hp.hop_length == 0\r\n \r\n # Quantize the wav\r\n if hp.voc_mode == 'RAW':\r\n if hp.mu_law:\r\n quant = audio.encode_mu_law(wav, mu=2 ** hp.bits)\r\n else:\r\n quant = audio.float_2_label(wav, bits=hp.bits)\r\n elif hp.voc_mode == 'MOL':\r\n quant = audio.float_2_label(wav, bits=16)\r\n \r\n return mel.astype(np.float32), quant.astype(np.int64)\r\n\r\n def __len__(self):\r\n return len(self.samples_fpaths)\r\n \r\n \r\ndef collate_vocoder(batch):\r\n mel_win = hp.voc_seq_len // hp.hop_length + 2 * hp.voc_pad\r\n max_offsets = [x[0].shape[-1] -2 - (mel_win + 2 * hp.voc_pad) for x in batch]\r\n mel_offsets = [np.random.randint(0, offset) for offset in max_offsets]\r\n sig_offsets = [(offset + hp.voc_pad) * hp.hop_length for offset in mel_offsets]\r\n\r\n mels = [x[0][:, mel_offsets[i]:mel_offsets[i] + mel_win] for i, x in enumerate(batch)]\r\n\r\n labels = [x[1][sig_offsets[i]:sig_offsets[i] + hp.voc_seq_len + 1] for i, x in enumerate(batch)]\r\n\r\n mels = np.stack(mels).astype(np.float32)\r\n labels = np.stack(labels).astype(np.int64)\r\n\r\n mels = torch.tensor(mels)\r\n labels = torch.tensor(labels).long()\r\n\r\n x = labels[:, :hp.voc_seq_len]\r\n y = labels[:, 1:]\r\n\r\n bits = 16 if hp.voc_mode == 'MOL' else hp.bits\r\n\r\n x = audio.label_2_float(x.float(), bits)\r\n\r\n if hp.voc_mode == 'MOL' :\r\n y = audio.label_2_float(y.float(), bits)\r\n\r\n return x, y, mels"
] | [
[
"numpy.pad",
"numpy.clip",
"numpy.stack",
"torch.tensor",
"numpy.load",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
crisgompec/ImbalancedDataset | [
"a758a740689d010180c77cc4e977c810d2a360ca"
] | [
"Dataset2/classifiers.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\n\n\nimport pandas as pd\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nimport random\n\n\"\"\"\nClassifiers Library\nThis library includes 5 different classifiers:\n Bagging\n Adaboost\n AdaCost\n BoostingSVM\n AdaMEC\n\"\"\"\n\n\n\"\"\" Custom Bagging implementation\"\"\"\nclass Bagging_classifier:\n def __init__(self, type_classifier, x=None, y=None, n_iterations = 20, ratio=0.1):\n \n \"\"\"\n type_classifier: DecisionTree, KNN, SVM, GaussianNB\n \"\"\"\n self.number_iterations = n_iterations\n self.type_classifier = type_classifier \n \n self.ratio = ratio #Ratio Bootstrapped dataset/ original dataset\n \n \n def fit(self,X_train,y_train):\n \n N = np.shape(X_train)[0]\n \n dataset_train = np.concatenate((X_train, y_train.reshape((N,1))),axis = 1)\n \n # There will be as many classifier models as iterations\n self.classifier_models = np.zeros(shape=self.number_iterations, dtype=object)\n \n for classifier_iteration in range(self.number_iterations):\n \n dataset_train_undersampled = dataset_train[random.sample(range(1,N),int(self.ratio*N)), :]\n\n X_train_undersampled = dataset_train_undersampled[:,0:58]\n y_train_undersampled = dataset_train_undersampled[:,58].astype(int)\n\n\n ### Train different algorithms\n\n # Decision tree\n if self.type_classifier == \"DecisionTree\": \n classifier = DecisionTreeClassifier(max_depth=1, max_leaf_nodes=2)\n classifier_model = classifier.fit(X_train_undersampled, y_train_undersampled)\n\n\n # K-NN\n elif self.type_classifier == \"KNN\":\n classifier = KNeighborsClassifier(n_neighbors=3)\n classifier_model = classifier.fit(X_train_undersampled, y_train_undersampled)\n\n \n # SVM\n elif self.type_classifier == \"SVM\":\n classifier = make_pipeline(StandardScaler(), SVC(gamma='auto'))\n classifier_model = classifier.fit(X_train_undersampled, y_train_undersampled)\n\n # Gaussian Naive Bayes \n elif self.type_classifier == \"GaussianNB\":\n classifier = GaussianNB()\n classifier_model = classifier.fit(X_train_undersampled, y_train_undersampled) \n else:\n print(\"Wrong classifier selection\")\n return\n \n self.classifier_models[classifier_iteration] = classifier_model\n \n return\n \n \n def predict(self,X_test):\n \n model_preds = np.array([model.predict(X_test) for model in self.classifier_models])\n y_test_pred = np.ceil(np.mean(model_preds,axis = 0))\n y_test_pred = np.where(y_test_pred < 0.5, -1, 1)\n return y_test_pred.astype(int)\n \n\n\"\"\" Custom AdaBoost Classifier implementation\"\"\"\nclass AdaboostClassifier:\n def __init__(self, x=None,y=None, n_iterations = 100):\n self.number_iterations = n_iterations\n \n def fit(self,X_train,y_train):\n number_samples = np.shape(X_train)[0]\n weights = np.ones(number_samples)/number_samples\n \n # There will be as many weak learners as iterations\n self.weak_learners = np.zeros(shape=self.number_iterations, dtype=object)\n self.significance_vec = np.zeros(shape=self.number_iterations)\n self.loss_vec = []\n #accuracy_vec = []\n self.error_vec = []\n \n for iterations in range(self.number_iterations):\n current_weights = weights\n\n weak_learner = DecisionTreeClassifier(max_depth=1, max_leaf_nodes=2)\n weak_learner_model = weak_learner.fit(X_train, y_train, sample_weight=current_weights)\n\n # The new weak learner model is saved\n self.weak_learners[iterations] = weak_learner_model\n weak_learner_pred = weak_learner_model.predict(X_train)\n\n error = 0\n incorrect_pred = 0\n correct_pred = 0\n for item_index in range(number_samples):\n if weak_learner_pred[item_index] != y_train[item_index]:\n incorrect_pred = incorrect_pred + 1\n error = error + current_weights[item_index]\n else: \n correct_pred = correct_pred + 1 \n\n # Save error for plotting \n self.error_vec.append(error)\n \n # Save loss\n self.loss_vec.append(incorrect_pred)\n\n # Significance of the weak learner model is calculated and saved\n significance = 0.5*np.log((1-error)/error) \n self.significance_vec[iterations] = significance\n\n # Update weights for each sample\n for item_index in range(number_samples):\n if weak_learner_pred[item_index] != y_train[item_index]:\n weights[item_index] = np.multiply(current_weights[item_index],np.exp(significance))\n else:\n weights[item_index] = current_weights[item_index]*np.exp(-significance)\n\n # Normalize weights\n weights /= weights.sum()\n \n \n def predict(self,X_test):\n model_preds = np.array([model.predict(X_test) for model in self.weak_learners])\n y_test_pred = np.sign(np.dot(self.significance_vec, model_preds))\n return y_test_pred.astype(int)\n\n\"\"\" Custom AdaCost Classifier implementation\"\"\"\nclass AdaCost:\n \"\"\"This is the implementation of the AdaCost Classifier. In this algorithm, \n the weight update is modified by adding a cost adjustment function φ. \n This function, for an instance with a higher cost factor increases its weight \n “more” if the instance is misclassified, but decreases its weight “less” otherwise.\n \n This implementation uses the function φ = +/-0.5*cost + 0.5\n \n Requires:\n X_train: Training features. Size N x D\n y_train: Training labels. Size N x 1\n cost: cost used to update weight via te adjustment function\n\n \"\"\"\n \n def __init__(self, x=None,y=None, cost = None, n_iterations = 100):\n self.number_iterations = n_iterations\n if x != None and y != None and cost != None:\n self.fit(x,y,cost)\n \n def fit(self,X_train,y_train, cost):\n # Initialize weights\n number_samples = len(X_train)\n weights = np.ones(number_samples)/number_samples\n \n # Define adjustment function φ (called beta)\n beta = 0.5*np.ones(number_samples)\n beta[np.where(y_train==1)[0]] += cost*0.5\n beta[np.where(y_train==-1)[0]] -= cost*0.5\n \n # Define vectors to store weak predictors and significances of each iteration\n self.weak_learners = np.zeros(shape=self.number_iterations, dtype=object)\n self.significance_vec = np.zeros(shape=self.number_iterations)\n \n self.error_vec = []\n self.loss_vec = []\n \n for it in range(self.number_iterations):\n current_weights = weights\n \n # Create and save week learner for this iteration\n weak_learner_model = DecisionTreeClassifier(max_depth=1, \n max_leaf_nodes=2).fit(X_train, y_train, sample_weight=current_weights)\n self.weak_learners[it] = weak_learner_model\n weak_learner_pred = weak_learner_model.predict(X_train)\n \n \n # Calculate error \n \n error = 0\n incorrect_pred = 0\n correct_pred = 0\n for item_index in range(number_samples):\n if weak_learner_pred[item_index] != y_train[item_index]:\n incorrect_pred = incorrect_pred + 1\n error = error + current_weights[item_index]\n else: \n correct_pred = correct_pred + 1 \n\n \n # Save error for plotting \n self.error_vec.append(error)\n \n # Save loss\n self.loss_vec.append(incorrect_pred)\n \n # Calculate r\n u = np.multiply(np.multiply(weak_learner_pred, y_train),beta)\n r = np.sum(np.multiply(weights,u))\n \n # Calculate and store significance of this iteration\n significance = 0.5 * np.log((1+r)/(1-r))\n self.significance_vec[it] = significance\n \n # Update weights for next iteration\n weights = np.multiply(weights,np.exp(-significance * u))\n weights /= weights.sum() \n \n # Round Debugging\n #print('Round %d' % (it))\n #print(u)\n #print(r)\n #print(significance)\n #print(weights)\n \n #input()\n\n \n def predict(self,X_test):\n model_preds = np.array([model.predict(X_test) for model in self.weak_learners])\n y_test_pred = np.sign(np.dot(self.significance_vec, model_preds))\n return y_test_pred.astype(int)\n\n\"\"\" Custom BoostingSVM Classifier implementation\"\"\"\nclass BoostingSVM:\n def __init__(self, n_estimators = 100, x=None,y=None, sigma_ini = 50, sigma_min = 10, sigma_step = 0.5):\n \"\"\"\n This is a custom implementation of the Boosting SVM algorithm. It is based \n on the combination of Adaboost and RFB SVM weak learners. To create the model,\n this class needs the following inputs:\n \n X_train: Training features. Size N x D\n y_train: Training labels. Size N x 1\n\n \"\"\"\n self.sigma_ini = sigma_ini\n self.sigma_step = sigma_step\n self.sigma_min = sigma_min\n self.number_iterations = n_estimators\n \n def fit(self,X_train,y_train):\n sigma = self.sigma_ini \n \n # Initialize weights\n number_samples = np.shape(X_train)[0]\n weights = np.ones(number_samples)/number_samples\n \n # Define vectors to store weak predictors and significances of each iteration\n self.weak_learners = [] #np.zeros(shape=self.number_iterations, dtype=object)\n self.significance_vec = [] #np.zeros(shape=self.number_iterations)\n self.error_debug = []\n \n # Todo: Apply dimensionality reduction\n self.pca = PCA(n_components = 2).fit(X_train)\n self.variance_explained = self.pca.explained_variance_ratio_\n X_train = self.pca.fit_transform(X_train)\n \n self.error_vec = []\n \n iteration = 0\n \n #for iterations in range(self.number_iterations):\n while sigma > self.sigma_min and iteration < self.number_iterations:\n print('Sigma: %.1f' % sigma)\n #print('BoostSVM iteration: %d' % (iterations))\n current_weights = weights\n \n # Create and save week learner for this iteration\n weak_learner = SVC(kernel='rbf', gamma = 1/2/sigma**2) #SVC(max_iter=10,tol=5)\n weak_learner_model = weak_learner.fit(X_train, y_train, sample_weight=current_weights)\n\n # The new weak learner model is saved\n weak_learner_pred = weak_learner_model.predict(X_train)\n \n # Calculate error\n error = np.sum(current_weights[np.where(weak_learner_pred != y_train)[0]]) \n self.error_debug.append(error)\n \n # Save error for plotting \n self.error_vec.append(error)\n \n if error > 0.5:\n sigma = sigma - self.sigma_step\n else:\n # Significance of the weak learner model is calculated and saved\n significance = 0.5*np.log((1-error)/error) \n self.significance_vec.append(significance)\n self.weak_learners.append(weak_learner_model)\n\n # Update weights for each sample\n idx_incorrect = np.where(weak_learner_pred != y_train)[0]\n idx_correct = np.where(weak_learner_pred == y_train)[0]\n weights[idx_incorrect] = np.multiply(current_weights[idx_incorrect],np.exp(significance))\n weights[idx_correct] = current_weights[idx_correct]*np.exp(-significance)\n\n # Normalize weights\n weights /= weights.sum()\n \n iteration = iteration + 1\n \n def predict(self,X_test):\n X_test_pca = self.pca.fit_transform(X_test)\n model_preds = np.array([model.predict(X_test_pca) for model in self.weak_learners])\n y_test_pred = np.sign(np.dot(self.significance_vec, model_preds))\n return y_test_pred.astype(int)\n\n\"\"\" AdaMEC Classifier implementation\nBased on:\nNikolaou, N., Edakunni, N. U., Kull, M., Flach, P. A., and Brown, G., \n'Cost-sensitive boosting algorithms: Do we really need them?', \nMachine Learning, 104(2), pages 359-384, 2016. \n[http://link.springer.com/article/10.1007/s10994-016-5572-x]\n\"\"\"\nclass AdaMEC:\n def __init__(self, x=None,y=None, n_iterations = 100):\n self.number_iterations = n_iterations\n \n def fit(self, X_train, y_train):\n X_train, X_cal, y_train, y_cal = train_test_split(X_train, y_train, test_size=0.5)\n \n #Train an AdaBoost ensemble\n AdaBoostUncal = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), n_estimators= self.number_iterations)\n AdaBoostUncal = AdaBoostUncal.fit(X_train, y_train)\n\n #Now calibrate the ensemble on the data reserved for calibration\n self.AdaBoostCal = CalibratedClassifierCV(AdaBoostUncal, cv=\"prefit\", method='sigmoid')\n self.AdaBoostCal.fit(X_cal, y_cal)\n \n# self.error_vec = self.estimator_errors_\n\n return self.AdaBoostCal\n\n def predict(self, threshold, X_test):\n scores_CalibratedAdaMEC = self.AdaBoostCal.predict(X_test)[:,1] #Positive Class scores\n y_pred_CalibratedAdaMEC = -np.ones(X_test.shape[0])\n y_pred_CalibratedAdaMEC[np.where(scores_CalibratedAdaMEC > threshold)] = 1#Classifications, AdaMEC uses a shifted decision threshold (skew-sensitive) \n\n return y_pred_CalibratedAdaMEC\n\n"
] | [
[
"numpy.dot",
"numpy.log",
"sklearn.naive_bayes.GaussianNB",
"numpy.multiply",
"sklearn.decomposition.PCA",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.tree.DecisionTreeClassifier",
"numpy.shape",
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.calibration.CalibratedClassifierCV",
"sklearn.preprocessing.StandardScaler",
"numpy.exp",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BrandonGower-Winter/ABM-gecco2022 | [
"2178c91397011bb11453b0c7f0252f9c9aacca6e"
] | [
"src/main_vis.py"
] | [
"# Note! This script will only work if the agents have the same attributes\nimport math\n\nimport Animate\nimport json\nimport numpy as np\nimport os\nimport pandas\nimport statistics\nimport matplotlib.pyplot as plt\n\nfrom ECAgent.Environments import discreteGridPosToID\nfrom CythonFunctions import CAgentUtilityFunctions\n\n\ndef get_json_iteration(filename: str) -> int:\n return int(filename[filename.index('_')+1:-5])\n\n\ndef get_csv_iteration(filename: str) -> int:\n return int(filename[filename.index('_')+1:-4])\n\n\ndef load_json_file(filename: str):\n with open(filename) as json_file:\n return json.load(json_file)\n\n\ndef load_json_files(folder_path: str, sort: bool = True, key=get_json_iteration) -> []:\n\n json_snapshots = []\n\n for root, _, files in os.walk(folder_path, topdown=True):\n\n json_files = [f for f in files if f[-4:] == 'json']\n if sort:\n json_files.sort(key=key)\n\n for file in json_files:\n with open(os.path.join(root, file)) as json_file:\n json_snapshots.append(json.load(json_file))\n\n return json_snapshots\n\n\ndef load_csv(filename: str) -> pandas.DataFrame:\n return pandas.read_csv(filename)\n\n\ndef load_csvs(folder_path: str, sort: bool = True, key=get_csv_iteration) -> [pandas.DataFrame]:\n\n pandas_snapshots = []\n\n for root, _, files in os.walk(folder_path, topdown=True):\n\n csv_files = [f for f in files if f[-3:] == 'csv']\n\n if sort:\n csv_files.sort(key=key)\n\n for file in csv_files:\n pandas_snapshots.append(load_csv(os.path.join(root, file)))\n\n return pandas_snapshots\n\n\ndef get_all_keys(item: dict) -> [str]:\n return [key for key in item]\n\n\ndef reformat_snapshots_to_per_entity_dicts(snapshots: [[dict]], id_str: str = 'id', filter: [str] = None):\n agent_data = {}\n\n if filter is None:\n filter = get_all_keys(snapshots[0][0])\n filter.remove(id_str)\n\n for i in range(len(snapshots)):\n\n for agent in snapshots[i]:\n a_id = agent[id_str]\n if a_id in agent_data:\n for prop in filter:\n agent_data[a_id][prop].append(agent[prop])\n agent_data[a_id]['iterations'].append(i)\n else:\n agent_data[a_id] = {}\n for prop in filter:\n agent_data[a_id][prop] = [agent[prop]]\n agent_data[a_id]['iterations'] = [i]\n\n return agent_data\n\n\ndef generate_composite_val(props: [str], snapshot: dict, comp_func, sort: bool = False):\n\n if len(props) > 1:\n ls = [[agent[prop] for prop in props] for agent in snapshot]\n else:\n ls = [agent[props[0]] for agent in snapshot]\n\n if len(ls) == 0:\n return 0\n\n if sort:\n ls.sort()\n\n return comp_func(ls)\n\n\ndef get_composite_property_as_dict(snapshots: [[dict]], props: [str], comp_funcs: [(str, any)],\n over_range: (int, int) = (0, -1), sort: bool = False) -> dict:\n\n prop_dict = {'iterations': []}\n\n over_range = over_range if over_range[1] != -1 else (over_range[0], len(snapshots))\n\n for i in range(over_range[0], over_range[1]):\n for func in comp_funcs:\n\n val = generate_composite_val(props, snapshots[i], func[1], sort)\n\n if func[0] in prop_dict:\n prop_dict[func[0]].append(val)\n else:\n prop_dict[func[0]] = [val]\n\n prop_dict['iterations'].append(i)\n\n return prop_dict\n\n\ndef create_composite_property_as_panda(snapshots: [pandas.DataFrame], func, kwargs_to_pass: dict = {}):\n\n for snapshot in snapshots:\n func(snapshot, **kwargs_to_pass)\n\n\ndef generate_plot_from_dict(title: str, data: dict, filename: str, filter: [str] = None, y_label: str = '',\n x_label: str = 'Iterations', legend: str = None) -> None:\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n if filter is None:\n filter = get_all_keys(data)\n filter.remove('iterations')\n\n for prop in filter:\n ax.plot(data['iterations'], data[prop], label=prop)\n\n if legend is not None:\n ax.legend(loc=legend)\n\n ax.set_aspect('auto')\n\n fig.savefig(filename)\n\n\ndef generate_plot_from_entity_dicts(title: str, data: dict, property: str, filename: str, y_label: str = '',\n x_label: str = 'Iterations', legend: str = None):\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n for e_id in data:\n ax.plot(data[e_id]['iterations'], data[e_id][property], label=e_id)\n\n if legend is not None:\n ax.legend(loc=legend)\n\n ax.set_aspect('auto')\n\n fig.savefig(filename)\n\n\ndef pandas_to_plot(title: str, width: int, height: int, data: [pandas.DataFrame], property:str, filename:str,\n slices: [int] = None, x_label: str = 'X', y_label: str = 'Y', vmin: int = 0, vmax: int = 100000) -> None:\n\n # Generate the pix map\n for i in slices:\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plot = ax.imshow(data[i][property].to_numpy().reshape(height, width), interpolation='none', cmap='jet', vmin=vmin, vmax=vmax)\n fig.colorbar(plot)\n fig.savefig(filename + '_{}.png'.format(i))\n\n\ndef pandas_to_animat(title: str, width: int, height: int, data: [pandas.DataFrame], property: str, filename: str,\n fps: int = 1, x_label: str = 'X', y_label: str = 'Y', vmin: int = 0, vmax: int = 100000) -> None:\n\n records = [df[property].to_numpy().reshape(height, width) for df in data]\n Animate.generateAnimat(title, records, fps, vmin, vmax, filename, x_label, y_label)\n\n\ndef log_file_to_list(file_path: str) -> [dict]:\n\n log_list = []\n\n with open(file_path, 'r') as log_file:\n iter_dict = {}\n\n for line in log_file.readlines():\n keyword = line[:line.find(':')]\n\n if keyword == 'ITERATION':\n log_list.append(iter_dict)\n iter_dict = {}\n elif keyword == 'GES':\n vals = str.split(line[line.find(':')+1:])\n iter_dict['temp'] = float(vals[0])\n iter_dict['rainfall'] = float(vals[1])\n elif keyword == 'HOUSEHOLD.FARM':\n if 'HOUSEHOLD.FARM' not in iter_dict:\n iter_dict['HOUSEHOLD.FARM'] = 0\n iter_dict['FARM_LOC'] = []\n\n iter_dict['HOUSEHOLD.FARM'] += 1\n\n vals = str.split(line[line.find(':')+1:])\n iter_dict['FARM_LOC'].append(int(vals[1]))\n\n elif keyword in iter_dict:\n iter_dict[keyword] += 1\n else:\n iter_dict[keyword] = 1\n\n return log_list\n\n\ndef generate_plot_from_log_list(title: str, data: [dict], filename: str, filter: [str], y_label: str = '', default_val = 0,\n x_label: str = 'Iterations', legend: str = None) -> None:\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n plot_content = {}\n\n for property in filter:\n plot_content[property] = []\n\n for i in range(len(data)):\n for property in filter:\n if property in data[i]:\n plot_content[property].append(data[i][property])\n else:\n plot_content[property].append(default_val)\n\n for property in filter:\n ax.plot(np.arange(len(data)), plot_content[property], label=property)\n\n if legend is not None:\n ax.legend(loc=legend)\n\n ax.set_aspect('auto')\n\n fig.savefig(filename)\n\n\ndef gini(x):\n\n # Mean Absolute Difference\n mad = np.abs(np.subtract.outer(x, x)).mean()\n # Relative Mean Absolute difference\n rmad = mad / np.mean(x)\n\n return 0.5 * rmad\n\n\ndef land_possesion(df : pandas.DataFrame, **kwargs):\n\n land = []\n\n for i in range(len(df)):\n if df['isSettlement'][i] != -1:\n land.append(2)\n elif df['isOwned'][i] != -1:\n land.append(1)\n else:\n land.append(0)\n\n df['land_ownership'] = land\n\n\ndef xtent_map(settlement_data : [], pixels):\n\n ret_data = []\n pos_data = []\n # Generating positions:\n for y in range(parser.height):\n for x in range(parser.width):\n pos_data.append((x, y))\n\n count = 1\n for it_set in settlement_data:\n print('Iteration: {}'.format(count))\n it_data = []\n for y in range(parser.height):\n it_row = []\n for x in range(parser.width):\n\n ws = np.zeros((len(it_set)))\n ds = np.zeros((len(it_set)))\n\n for i in range(len(it_set)):\n\n ws[i] = it_set[i]['wealth'] + it_set[i]['load']\n n_x = pos_data[\n it_set[i]['pos'][0]\n ][0]\n n_y = pos_data[it_set[i]['pos'][0]][1]\n ds[i] = math.sqrt(((x - n_x) ** 2) + ((y - n_y) ** 2))\n\n ds = ds * 2000 # Cell Size\n dst = CAgentUtilityFunctions.xtent_distribution(ws, ds, 0.75, 1.5)\n\n iSettlement = np.argmax(dst)\n\n it_row.append(it_set[iSettlement]['id']+1 if dst[iSettlement] > 0.0 else 0)\n\n it_data.append(it_row)\n\n ret_data.append(it_data)\n count += 1\n\n return ret_data\n\n\ndef get_dict_by_id(l : [], id: int):\n for item in l:\n if item['id'] == id:\n return item\n return None\n\n\ndef xtent_to_property(xtent_arr: [], settlement_data: [], property: str, modifier = 0.0):\n to_ret = []\n\n for z in range(len(xtent_arr)):\n map = []\n for y in range(len(xtent_arr[z])):\n row = []\n for x in range(len(xtent_arr[z][y])):\n if xtent_arr[z][y][x] != 0:\n settlement = get_dict_by_id(settlement_data[z], int(xtent_arr[z][y][x] - 1))\n if settlement is not None and 'belief_space' in settlement:\n row.append(settlement['belief_space'][property] + modifier)\n else:\n row.append(0.0)\n else:\n row.append(0.0)\n map.append(row)\n to_ret.append(map)\n\n return to_ret\n\n\ndef household_social_status_weighted_mean(data: []):\n\n total_social_status = sum([point[1] + point[2] for point in data])\n return sum([point[0] * (point[1] + point[2]) / total_social_status for point in data])\n\n\ndef bin01(data: []):\n\n counts = [0 for _ in range(10)]\n\n for val in data:\n index = int(max(min(math.ceil(val * 10) - 1, 9), 0))\n counts[index] += 1\n\n return [p / float(len(data)) for p in counts]\n\n\ndef generate_household_plots(parser):\n agent_snapshots = load_json_files(parser.path + '/agents')\n\n if not os.path.isdir(parser.path + '/agent_plots'):\n os.mkdir(parser.path + '/agent_plots')\n\n population_dict = get_composite_property_as_dict(agent_snapshots, ['resources'],\n [('mean', statistics.mean),\n ('median', statistics.median),\n ('min', min),\n ('max', max),\n ('total', sum),\n ('gini', gini)], sort=True)\n\n household = get_composite_property_as_dict(agent_snapshots, ['occupants'],\n [('mean', statistics.mean),\n ('median', statistics.median),\n ('min', min),\n ('max', max),\n ('total', sum)], sort=True)\n\n generate_plot_from_dict('Summary of Household Resources over 1000 years', population_dict,\n parser.path + '/agent_plots/resource_summary.png',\n filter=['mean', 'median', 'min', 'max'],\n y_label='Resources (KG)', legend='center left')\n\n generate_plot_from_dict('Total Household Resources over 1000 years', population_dict,\n parser.path + '/agent_plots/resource_total.png',\n filter=['total'],\n y_label='Resources (KG)', legend='center right')\n\n generate_plot_from_dict('Summary of Household Population over 1000 years', household,\n parser.path + '/agent_plots/population_summary.png',\n filter=['mean', 'median', 'min', 'max'],\n y_label='Population', legend='center left')\n\n generate_plot_from_dict('Total Household Population', household,\n parser.path + '/agent_plots/occupants_total.png',\n filter=['total'],\n y_label='Population', legend='center right')\n\n generate_plot_from_dict('Gini Coeffecient for Households over 1000 years', population_dict,\n parser.path + '/agent_plots/resources_gini.png',\n filter=['gini'], legend='center right')\n\n transfer_dict = {}\n transfer_dict['Peer Transfer'] = get_composite_property_as_dict(agent_snapshots, ['peer_chance'],\n [('mean', statistics.mean)], sort=True)['mean']\n transfer_dict['Subordinate Transfer'] = get_composite_property_as_dict(agent_snapshots, ['sub_chance'],\n [('mean', statistics.mean)], sort=True)['mean']\n transfer_dict['iterations'] = np.arange(len(transfer_dict['Peer Transfer']))\n\n generate_plot_from_dict('Average Transfer Percentage of Agents over 2000 iterations', transfer_dict,\n parser.path + '/agent_plots/transfer_chance.png',\n y_label='Probability', legend='center left')\n\n attach_dict = get_composite_property_as_dict(agent_snapshots, ['attachment'],[('mean', statistics.mean)], sort=True)\n\n generate_plot_from_dict('Average Attachment of Agents over 2000 iterations', attach_dict,\n parser.path + '/agent_plots/attachment.png',\n y_label='Attachment', legend='center left')\n\n bins = ['0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8', '0.9', '1.0']\n\n precords = get_composite_property_as_dict(agent_snapshots, ['peer_chance'],\n [('bin', bin01)])['bin']\n\n srecords = get_composite_property_as_dict(agent_snapshots, ['sub_chance'],\n [('bin', bin01)])['bin']\n\n records = [[precords[i], srecords[i]] for i in range(len(precords))]\n\n Animate.generateBarAnimat(\"Animation of Distribution of the Agent Resource Transfer Probabilities\", records, bins,\n 50, parser.path + \"/agent_plots/peer_transfer\", 'Transfer Chance', '%',\n colors=['r', 'b'], labels=['Peer Chance', 'Sub Chance'])\n\n percentage_to_farm = get_composite_property_as_dict(agent_snapshots, ['percentage_to_farm'],\n [('mean', statistics.mean)], sort=True)\n\n generate_plot_from_dict('Average Percentage to Farm over 2000 Iterations', percentage_to_farm,\n parser.path + '/agent_plots/percentage_to_farm.png',\n filter=['mean'],\n y_label='%', legend='center right')\n\n\ndef generate_settlement_plots(parser, pixels):\n # Settlement Plots\n if not os.path.isdir(parser.path + '/settlement_plots'):\n os.mkdir(parser.path + '/settlement_plots')\n\n settlement_snapshots = load_json_files(parser.path + '/settlements')\n\n settlement_dict = get_composite_property_as_dict(settlement_snapshots, ['wealth'],\n [('mean', statistics.mean),\n ('median', statistics.median),\n ('min', min),\n ('max', max),\n ('total', sum),\n ('gini', gini)], sort=True)\n\n generate_plot_from_dict('Summary of Settlement Resources over 1000 years', settlement_dict,\n parser.path + '/settlement_plots/resource_summary.png',\n filter=['mean', 'median', 'min', 'max'],\n y_label='Resources (KG)', legend='center left')\n\n generate_plot_from_dict('Total Settlement Resources over 1000 years', settlement_dict,\n parser.path + '/settlement_plots/resource_total.png',\n filter=['total'],\n y_label='Resources (KG)', legend='center right')\n\n generate_plot_from_dict('Gini Coeffecient for Settlements over 1000 years', settlement_dict,\n parser.path + '/settlement_plots/resources_gini.png',\n filter=['gini'], legend='center right')\n\n xtent_arr = xtent_map(settlement_snapshots, pixels)\n\n farm_utility_arr = xtent_to_property(xtent_arr, settlement_snapshots, 'farm_utility')\n forage_utility_arr = xtent_to_property(xtent_arr, settlement_snapshots, 'forage_utility')\n\n for z in range(len(farm_utility_arr)):\n for y in range(len(farm_utility_arr[z])):\n for x in range(len(farm_utility_arr[z][y])):\n if farm_utility_arr[z][y][x] == 0.0 and forage_utility_arr[z][y][x] == 0.0:\n farm_utility_arr[z][y][x] = 0.0\n else:\n farm_utility_arr[z][y][x] = 2.0 if farm_utility_arr[z][y][x] > forage_utility_arr[z][y][x] else 1.0\n\n learning_rate_arr = xtent_to_property(xtent_arr, settlement_snapshots, 'learning_rate', 1)\n conformity_arr = xtent_to_property(xtent_arr, settlement_snapshots, 'conformity', 1)\n peer_arr = xtent_to_property(xtent_arr, settlement_snapshots, 'peer_transfer', 1)\n sub_arr = xtent_to_property(xtent_arr, settlement_snapshots, 'sub_transfer', 1)\n\n Animate.generateAnimat('Xtent model showing Settlement Territory', xtent_arr, fps=100, vmin=0, vmax=300,\n filename=parser.path + '/settlement_plots/xtent_animat')\n\n Animate.generateAnimat('Influence of Settlement Farm/Forage Preference', farm_utility_arr, fps=100, vmin=0, vmax=2,\n filename=parser.path + '/settlement_plots/farm_utility_influence_animat')\n\n Animate.generateAnimat('Influence of Settlement Learning Rate', learning_rate_arr, fps=100, vmin=0, vmax=1.2,\n filename=parser.path + '/settlement_plots/learning_rate_influence_animat')\n\n Animate.generateAnimat('Influence of Settlement Conformity', conformity_arr, fps=100, vmin=0, vmax=1.2,\n filename=parser.path + '/settlement_plots/conformity_influence_animat')\n\n Animate.generateAnimat('Influence of Settlement Peer Exchange', peer_arr, fps=100, vmin=0, vmax=2,\n filename=parser.path + '/settlement_plots/peer_influence_animat')\n\n Animate.generateAnimat('Influence of Settlement Sub Exchange', sub_arr, fps=100, vmin=0, vmax=2,\n filename=parser.path + '/settlement_plots/sub_influence_animat')\n\n\ndef generate_environment_plots(parser, pixels):\n\n if not os.path.isdir(parser.path + '/environment_plots'):\n os.mkdir(parser.path + '/environment_plots')\n\n environment_snapshots = load_csvs(parser.path + '/environment')\n\n create_composite_property_as_panda(environment_snapshots, land_possesion, {'pixels': pixels})\n pandas_to_animat('NeoCOOP Visual Representation', parser.width, parser.height, environment_snapshots, 'land_ownership',\n parser.path + '/environment_plots/land_ownership_animat', 100, vmin=0, vmax=2)\n pandas_to_animat('Animation of `Vegetation` over 1000 years', parser.width, parser.height, environment_snapshots,\n 'vegetation',\n parser.path + '/environment_plots/vegetation_animat', 10, vmin=0, vmax=21500)\n #pandas_to_animat('Animation of `Soil Moisture` over 1000 years', parser.width, parser.height, environment_snapshots,\n #'moisture',\n #parser.path + '/environment_plots/moisture_animat', 10, vmin=0, vmax=700)\n\n\ndef generate_log_plots(parser):\n\n if not os.path.isdir(parser.path + '/log_plots'):\n os.mkdir(parser.path + '/log_plots')\n\n log_list = log_file_to_list(parser.path + '/events.log')\n\n generate_plot_from_log_list('Household Farm and Forage actions over 2000 iterations', log_list,\n parser.path + '/log_plots/FarmForage.png', ['HOUSEHOLD.FARM', 'HOUSEHOLD.FORAGE'],\n y_label='Number of actions', default_val=0, legend='center left')\n generate_plot_from_log_list(\"House Resource Transfer Actions over 2000 iterations\", log_list,\n parser.path + '/log_plots/ResourceTransfer.png',\n ['HOUSEHOLD.RESOURCES.TRANSFER.PEER',\n 'HOUSEHOLD.RESOURCES.TRANSFER.AUTH',\n 'HOUSEHOLD.RESOURCES.TRANSFER.SUB'],\n y_label='Number of actions', default_val=0, legend='center left')\n\n\ndef dynamic_farm_animat(parser, pixels):\n\n environment_snapshots = load_csvs(parser.path + '/environment')\n log_list = log_file_to_list(parser.path + '/events.log')\n\n count = 0\n\n def dynamic_farm(df, **kwargs):\n land = []\n nonlocal count\n\n for i in range(len(df)):\n if df['isSettlement'][i] != -1:\n land.append(2)\n elif 'FARM_LOC' in log_list[count] and i in log_list[count]['FARM_LOC']:\n land.append(3)\n else:\n land.append(kwargs['pixels'][i])\n\n df['dynamic_ownership'] = land\n count += 1\n\n create_composite_property_as_panda(environment_snapshots, dynamic_farm, {'pixels': pixels})\n\n pandas_to_animat('Settlement and Farm Locations on Map', parser.width, parser.height, environment_snapshots,\n 'dynamic_ownership',\n parser.path + '/environment_plots/dynamic_farm_animat', 10, vmin=0, vmax=3)\n\n\ndef other_stuff():\n tradagent_snapshots = load_json_files('trad_sc2/agents')\n utilagent_snapshots = load_json_files('utility_sc2/agents')\n adaptive_snapshots = load_json_files('adaptive_sc2/agents')\n\n plot_dict = {}\n plot_dict['Traditional'] = get_composite_property_as_dict(tradagent_snapshots, 'occupants',\n [('total', sum)], sort=True)['total']\n\n plot_dict['iterations'] = np.arange(len(plot_dict['Traditional']))\n\n plot_dict['Utility'] = get_composite_property_as_dict(utilagent_snapshots, 'occupants',\n [('total', sum)], sort=True)['total']\n\n plot_dict['Adaptive'] = get_composite_property_as_dict(adaptive_snapshots, 'occupants',\n [('total', sum)], sort=True)['total']\n\n generate_plot_from_dict('Total Household Population', plot_dict,\n 'pop_comparison_all.png',\n filter=['Traditional', 'Utility', 'Adaptive'],\n y_label='Population', legend='center right')\n\n\nif __name__ == '__main__':\n import argparse\n from PIL import Image\n\n parser = argparse.ArgumentParser()\n parser.add_argument('path', help='The path to the folder containing all of the generated data', type=str)\n parser.add_argument('width', help='The width of the map.', type=int)\n parser.add_argument('height', help='The height of the map.', type=int)\n parser.add_argument('-v', '--verbose', help='Will print out informative information to the terminal.',\n action='store_true')\n\n parser = parser.parse_args()\n\n pixels = []\n\n for y in range(parser.height):\n for x in range(parser.width):\n pixels.append(0)\n\n #generate_settlement_plots(parser, pixels)\n #generate_log_plots(parser)\n #generate_environment_plots(parser, pixels)\n generate_household_plots(parser)\n\n #other_stuff()\n #dynamic_farm_animat(parser, pixels)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"numpy.subtract.outer",
"numpy.argmax",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eduardohenriquearnold/coop-3dod-infra | [
"05c6620b6856efa5d5a060efc4a874ba5b5460c0"
] | [
"lib/models/voxelnet.py"
] | [
"from .model import model\nfrom .torch_util import Conv2d, Conv3d\nfrom .region_proposal_network import RPN\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport time\n\nimport logging\nlogger = logging.getLogger('global')\n\nclass FCN(nn.Module):\n def __init__(self, inplanes, planes):\n super(FCN, self).__init__()\n planes = int(planes/2)\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=True)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n return out\n\nclass VFE(nn.Module):\n def __init__(self, inplanes, planes):\n super(VFE, self).__init__()\n self.fcn1 = FCN(inplanes, planes)\n\n def forward(self, x):\n batch, channel, voxels, num_T = x.size()\n out = self.fcn1(x)\n point_wise_feature = F.max_pool2d(out, kernel_size=[1, num_T], stride=[1, num_T])\n logger.debug('point_wise_feature size: {}'.format(point_wise_feature.size()))\n out = torch.cat((out, point_wise_feature.repeat(1, 1, 1, num_T)), 1)\n logger.debug('VFE size: {}'.format(out.size()))\n return out\n\nclass Conv_Middle_layers(nn.Module):\n def __init__(self, ):\n super(Conv_Middle_layers, self).__init__()\n self.conv1 = Conv3d(128, 64, stride=(2, 1, 1), padding=(1, 1, 1))\n self.conv2 = Conv3d(64, 64, stride=(1, 1, 1), padding=(0, 1, 1))\n self.conv3 = Conv3d(64, 64, stride=(2, 1, 1), padding=(1, 1, 1))\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n shape = out.size()\n logger.debug(\"conv3d feature size: {}\".format(shape))\n out = out.view(shape[0], -1, shape[-2], shape[-1])\n logger.debug(\"after reshape size: {}\".format(out.size()))\n return out\n\nclass feature_learning_network(nn.Module):\n def __init__(self):\n super(feature_learning_network, self).__init__()\n self.vfe1 = VFE(6, 32)\n self.fcn1 = FCN(32, 256)\n\n def forward(self, x):\n batch, channel, voxels, num_T = x.size()\n out = self.vfe1(x)\n out = self.fcn1(out)\n point_wise_feature = F.max_pool2d(out, kernel_size=[1, num_T], stride=[1, num_T])\n return point_wise_feature\n\nclass Voxelnet(model):\n def __init__(self, cfg):\n super(Voxelnet, self).__init__(cfg=cfg)\n self.number_T = cfg['shared']['number_T']\n self.use_random_sampling = cfg['shared']['use_random_sampling']\n self.num_anchors = cfg['shared']['num_anchors']\n self.num_classes = cfg['shared']['num_classes']\n\n self.feature_learnig = feature_learning_network()\n self.conv3d = Conv_Middle_layers()\n self._rpn = RPN(self.num_classes, self.num_anchors)\n # device = torch.device('cuda')\n # self.new_features_tmp = torch.zeros([1, 400, 11, 352, 128], device=device, requires_grad=True)\n\n def RandomSampleing(self):\n pass\n\n def old_feature_extractor(self, voxel_with_points, num_pts, leaf_out, voxel_indices, num_divisions):\n batch, valid_voxels, num_T, channels = voxel_with_points.size()\n voxel_with_points_reshaped = voxel_with_points.permute(0,3,1,2)\n logger.debug(\"voxel_with_points size: {}\".format(voxel_with_points.size()))\n logger.debug(\"reshaped_voxel_with_points size: {}\".format(voxel_with_points_reshaped.size()))\n\n t0 =time.time()\n features = self.feature_learnig(voxel_with_points_reshaped)\n features = features.view(batch, -1, valid_voxels)\n # batch, valid_voxels, channels\n features = features.permute(0,2,1).contiguous()\n features = features.view(batch*valid_voxels, -1)\n logger.debug(\"after feature learning, the features shape: {}\".format(features.size()))\n\n t1=time.time()\n z, y, x = num_divisions[0]\n device =torch.device('cuda')\n new_features = torch.zeros([batch, z, y, x, features.size(-1)], device=device, requires_grad=False)\n\n t1_0 =time.time()\n logger.debug(\"new_features is leaf: {}, required_gred:{}\".format(new_features.is_leaf, new_features.requires_grad))\n t1_1 = time.time()\n voxel_indices = voxel_indices.view(-1, voxel_indices.size(-1))\n b_ix = voxel_indices[:, 0]\n indices_z = voxel_indices[:, 1]\n indices_y = voxel_indices[:, 2]\n indices_x = voxel_indices[:, 3]\n # logger.debug(\"new_features[b_ix, indices_z, indices_y, indices_x]'s size: {}\".format(new_features[b_ix, indices_z, indices_y, indices_x].size()))\n t1_2 =time.time()\n new_features[b_ix, indices_z, indices_y, indices_x] = features\n new_features = new_features.permute(0,4,2,1,3)\n logger.debug('new_features size: {}'.format(new_features.size()))\n logger.debug('featues requires_grad: {}'.format(features.requires_grad))\n\n t2=time.time()\n out = self.conv3d(new_features)\n t3=time.time()\n logger.debug(\"USED TIME, feature_learnig:{}, VFE_3D featture:{}, 3D conv:{}\".format(t1-t0, t2-t1,t3-t0))\n logger.debug(\"VFE to 3D feature, create_variable:{} {}, get indices:{}, assignment:{}\".format(t1_0-t1, t1_1-t1_0, t1_2-t1_1, t2-t1_2))\n return out\n\n def feature_extractor(self, voxel_with_points, num_pts, leaf_out, voxel_indices, num_divisions):\n batch, valid_voxels, num_T, channels = voxel_with_points.size()\n voxel_with_points_reshaped = voxel_with_points.permute(0,3,1,2)\n logger.debug(\"voxel_with_points size: {}\".format(voxel_with_points.size()))\n logger.debug(\"reshaped_voxel_with_points size: {}\".format(voxel_with_points_reshaped.size()))\n\n t0 =time.time()\n features = self.feature_learnig(voxel_with_points_reshaped)\n features = features.view(batch, -1, valid_voxels)\n # batch, valid_voxels, channels\n features = features.permute(0,2,1).contiguous()\n # features = features.view(batch*valid_voxels, -1)\n logger.debug(\"after feature learning, the features shape: {}\".format(features.size()))\n\n t1=time.time()\n z, y, x = num_divisions[0]\n device =torch.device('cuda')\n new_features = torch.zeros([batch, z, y, x, features.size(-1)], device=device, requires_grad=False)\n\n t1_0 =time.time()\n logger.debug(\"new_features is leaf: {}, required_gred:{}\".format(new_features.is_leaf, new_features.requires_grad))\n t1_1 = time.time()\n for b_ix in range(batch):\n indices_z = voxel_indices[b_ix, :, 1]\n indices_y = voxel_indices[b_ix, :, 2]\n indices_x = voxel_indices[b_ix, :, 3]\n new_features[b_ix, indices_z, indices_y, indices_x] = features[b_ix]\n # logger.debug(\"new_features[b_ix, indices_z, indices_y, indices_x]'s size: {}\".format(new_features[b_ix, indices_z, indices_y, indices_x].size()))\n t1_2 =time.time()\n # new_features[b_ix, indices_z, indices_y, indices_x] = features\n new_features = new_features.permute(0,4,2,1,3)\n logger.debug('new_features size: {}'.format(new_features.size()))\n logger.debug('featues requires_grad: {}'.format(features.requires_grad))\n\n t2=time.time()\n out = self.conv3d(new_features)\n t3=time.time()\n logger.debug(\"USED TIME, feature_learnig:{}, VFE_3D featture:{}, 3D conv:{}\".format(t1-t0, t2-t1,t3-t0))\n logger.debug(\"VFE to 3D feature, create_variable:{} {}, get indices:{}, assignment:{}\".format(t1_0-t1, t1_1-t1_0, t1_2-t1_1, t2-t1_2))\n return out\n\n def rpn(self, x):\n rpn_pred_cls, rpn_pred_loc = self._rpn(x)\n return rpn_pred_cls, rpn_pred_loc\n"
] | [
[
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.device",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sunzhe09/Myproject | [
"a152f0d6199998c52a20ee51ab7d22f5e0e1de7e"
] | [
"is_my_face.py"
] | [
"\r\nimport tensorflow as tf\r\nimport cv2\r\nimport sys\r\nimport os\r\nimport dlib\r\nfrom sklearn.model_selection import train_test_split\r\nfrom train_faces import cnnLayer,size,x,keep_prob_5,keep_prob_75\r\n\r\noutput = cnnLayer() \r\npredict = tf.argmax(output, 1) \r\n\r\nsaver = tf.train.Saver() \r\nsess = tf.Session() \r\npath=tf.train.latest_checkpoint('.')\r\npath=path.replace('.\\\\','./',1)\r\nsaver.restore(sess,path) \r\n\r\ndef is_my_face(image): \r\n res = sess.run(predict, feed_dict={x: [image/255.0], keep_prob_5:1.0, keep_prob_75: 1.0}) \r\n if res[0] == 1: \r\n return True \r\n else: \r\n return False \r\n\r\n#ʹ��dlib�Դ���frontal_face_detector��Ϊ���ǵ�������ȡ��\r\ndetector = dlib.get_frontal_face_detector()\r\ncam = cv2.VideoCapture(0) \r\nfont=cv2.FONT_HERSHEY_COMPLEX\r\n\r\nwhile True: \r\n _, img = cam.read() \r\n gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n dets = detector(gray_image, 1)\r\n \r\n if not len(dets):\r\n print('Can`t get face.')\r\n cv2.imshow('img', img)\r\n key = cv2.waitKey(30) & 0xff \r\n if key == 27:\r\n sys.exit(0)\r\n for i, d in enumerate(dets):\r\n x1 = d.top() if d.top() > 0 else 0\r\n y1 = d.bottom() if d.bottom() > 0 else 0\r\n x2 = d.left() if d.left() > 0 else 0\r\n y2 = d.right() if d.right() > 0 else 0\r\n face = img[x1:y1,x2:y2]\r\n # ����ͼƬ�ijߴ�\r\n face = cv2.resize(face, (size,size))\r\n k=is_my_face(face)\r\n print('Is this my face? %s' % k)\r\n if k==True:\r\n cv2.putText(img,'Me',(x2,x1),cv2.FONT_HERSHEY_COMPLEX, 1.2, (255, 0 ,255), thickness = 4, lineType = 1)\r\n #playsound('./hello.mp3')\r\n \r\n if k==False:\r\n cv2.putText(img,'Stranger',(x2,x1),cv2.FONT_HERSHEY_COMPLEX, 1.2, (0, 0 ,255), thickness = 4, lineType = 1)\r\n\r\n cv2.rectangle(img, (x2,x1),(y2,y1), (255,0,0),3)\r\n cv2.imshow('img',img)\r\n key = cv2.waitKey(30) & 0xff\r\n if key == 27:\r\n sys.exit(0)\r\n\r\nsess.close() \r\n"
] | [
[
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.train.latest_checkpoint",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
heypaprika/You_Only_Look_Once | [
"7ba648a2af051b43317aee6956f8f5a441c661f4"
] | [
"utilities/dataloader.py"
] | [
"import sys\nimport os\n\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nfrom PIL import Image\n\nfrom convertYolo.Format import YOLO as cvtYOLO\nfrom convertYolo.Format import VOC as cvtVOC\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport matplotlib.pyplot as plt\n\nsys.path.insert(0, os.path.dirname(__file__))\n\nclass VOC(Dataset):\n IMAGE_FOLDER = \"JPEGImages\"\n LABEL_FOLDER = \"Annotations\"\n IMG_EXTENSIONS = \".jpg\"\n \n def __init__(self, root, train=True, transform=None, target_transform=None, resize=448, class_path='./voc.names'):\n self.root = root\n self.train = train\n self.transform = transform\n self.target_transform = target_transform\n self.resizing_size = resize\n self.class_path = class_path\n \n with open(self.class_path) as f:\n self.classes = f.read().splitlines()\n if not self._check_exists():\n raise RuntimeError(\"Dataset not found.\")\n self.data = self.cvtData()\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n key = list(self.data[index].keys())[0]\n img = Image.open(key).convert('RGB')\n current_shape = img.size\n img = img.resize((self.resizing_size, self.resizing_size))\n target = self.data[index][key]\n \n if self.transform is not None:\n img, aug_target = self.transform([img, target])\n img = torchvision.transforms.ToTensor()(img)\n \n return img, aug_target, current_shape\n\n def _check_exists(self):\n print(\"Image Folder:{}\".format(\n os.path.join(self.root, self.IMAGE_FOLDER)\n ))\n print(\"Label Folder:{}\".format(\n os.path.join(self.root, self.LABEL_FOLDER)\n ))\n \n is_exist = (\n os.path.exists(\n os.path.join(self.root, self.IMAGE_FOLDER)\n )\n ) and (\n os.path.exists(\n os.path.join(self.root, self.LABEL_FOLDER)\n )\n )\n \n return is_exist\n \n def cvtData(self):\n result = []\n voc = cvtVOC()\n flag, self.dict_data = voc.parse(os.path.join(self.root, self.LABEL_FOLDER))\n yolo = cvtYOLO(os.path.abspath(self.class_path))\n \n try:\n if flag:\n flag, data = yolo.generate(self.dict_data)\n keys = list(data.keys())\n keys = sorted(keys, key=lambda key: int(key.split(\"_\")[-1]))\n \n for key in keys:\n contents = list(filter(None, data[key].split(\"\\n\")))\n target = []\n for i in range(len(contents)):\n tmp = contents[i].split(\" \")\n for j in range(len(tmp)):\n tmp[j] = float(tmp[j])\n target.append(tmp)\n \n result.append({\n os.path.join(\n self.root, \n self.IMAGE_FOLDER, \n \"\".join([key, self.IMG_EXTENSIONS])\n ) : target\n })\n \n return result\n except Exception as e:\n raise RuntimeError(\"Error : {}\".format(e))\n\ndef detection_collate(batch):\n targets = []\n imgs = []\n sizes = []\n\n for sample in batch:\n imgs.append(sample[0])\n sizes.append(sample[2])\n\n np_label = np.zeros((7,7,6), dtype=np.float32)\n\n for object in sample[1]:\n objectness = 1\n classes = object[0]\n x_ratio = object[1]\n y_ratio = object[2]\n w_ratio = object[3]\n h_ratio = object[4]\n\n scale_factor = 1 / 7\n grid_x_index = int(x_ratio // scale_factor)\n grid_y_index = int(y_ratio // scale_factor)\n x_offset = x_ratio / scale_factor - grid_x_index\n y_offset = y_ratio / scale_factor - grid_y_index\n\n np_label[grid_x_index][grid_y_index] = np.array([objectness, x_offset, y_offset, w_ratio, h_ratio, classes])\n\n label = torch.from_numpy(np_label)\n targets.append(label)\n\n return torch.stack(imgs, 0), torch.stack(targets, 0), sizes"
] | [
[
"torch.stack",
"numpy.array",
"numpy.zeros",
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
i008/zeroml | [
"849258d34606698aba44c5f0028254025755e8ff"
] | [
"tests/conftest.py"
] | [
"import uuid\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport PIL.Image\nimport pytest\n\n\[email protected]\ndef random_pil_image() -> PIL.Image:\n image = PIL.Image.fromarray(np.random.randn(224, 224, 3).astype(\"uint8\"))\n return image\n\n\[email protected]\ndef create_image_test_dataframe(tmp_path) -> (pd.DataFrame, Path):\n\n images = [PIL.Image.fromarray(np.ones((224, 224, 3)).astype(\"uint8\")) for _ in range(10)]\n labels = [i for i in range(10)]\n fns = [str(uuid.uuid4()) + '.png' for _ in range(10)]\n print(fns)\n\n for image, f in zip(images, fns):\n image.save(tmp_path / f)\n\n df = pd.DataFrame({'labels': labels, 'file_names': fns})\n base_path = tmp_path\n return df, base_path\n"
] | [
[
"numpy.random.randn",
"pandas.DataFrame",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
guillaumekln/estimator | [
"9c750bc38582d4c2f18c533bf2f8aa36c51b2f75"
] | [
"tensorflow_estimator/python/estimator/canned/dnn.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Deep Neural Network estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.python.feature_column import feature_column\nfrom tensorflow.python.feature_column import feature_column_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.layers import core as core_layers\nfrom tensorflow.python.layers import normalization\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.util.tf_export import estimator_export\nfrom tensorflow_estimator.python.estimator import estimator\nfrom tensorflow_estimator.python.estimator import model_fn\nfrom tensorflow_estimator.python.estimator.canned import head as head_lib\nfrom tensorflow_estimator.python.estimator.canned import optimizers\nfrom tensorflow_estimator.python.estimator.head import head_utils\nfrom tensorflow_estimator.python.estimator.head import regression_head\n\n# The default learning rate of 0.05 is a historical artifact of the initial\n# implementation, but seems a reasonable choice.\n_LEARNING_RATE = 0.05\n\n\ndef _add_hidden_layer_summary(value, tag):\n summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))\n summary.histogram('%s/activation' % tag, value)\n\n\n@estimator_export('estimator.experimental.dnn_logit_fn_builder')\ndef dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,\n dropout, input_layer_partitioner, batch_norm):\n \"\"\"Function builder for a dnn logit_fn.\n\n Args:\n units: An int indicating the dimension of the logit layer. In the\n MultiHead case, this should be the sum of all component Heads' logit\n dimensions.\n hidden_units: Iterable of integer number of hidden units per layer.\n feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.\n activation_fn: Activation function applied to each layer.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Partitioner for input layer.\n batch_norm: Whether to use batch normalization after each hidden layer.\n\n Returns:\n A logit_fn (see below).\n\n Raises:\n ValueError: If units is not an int.\n \"\"\"\n if not isinstance(units, int):\n raise ValueError('units must be an int. Given type: {}'.format(\n type(units)))\n\n def dnn_logit_fn(features, mode):\n \"\"\"Deep Neural Network logit_fn.\n\n Args:\n features: This is the first item returned from the `input_fn`\n passed to `train`, `evaluate`, and `predict`. This should be a\n single `Tensor` or `dict` of same.\n mode: Optional. Specifies if this training, evaluation or prediction. See\n `ModeKeys`.\n\n Returns:\n A `Tensor` representing the logits, or a list of `Tensor`'s representing\n multiple logits in the MultiHead case.\n \"\"\"\n dnn_model = _DNNModel(\n units,\n hidden_units,\n feature_columns,\n activation_fn,\n dropout,\n input_layer_partitioner,\n batch_norm,\n name='dnn')\n return dnn_model(features, mode)\n\n return dnn_logit_fn\n\n\ndef _get_previous_name_scope():\n current_name_scope = ops.get_name_scope()\n return current_name_scope.rsplit('/', 1)[0] + '/'\n\n\nclass _DNNModel(training.Model):\n \"\"\"A DNN Model.\"\"\"\n\n def __init__(self,\n units,\n hidden_units,\n feature_columns,\n activation_fn,\n dropout,\n input_layer_partitioner,\n batch_norm,\n name=None,\n **kwargs):\n super(_DNNModel, self).__init__(name=name, **kwargs)\n if feature_column_lib.is_feature_column_v2(feature_columns):\n self._input_layer = feature_column_lib.DenseFeatures(\n feature_columns=feature_columns, name='input_layer')\n else:\n self._input_layer = feature_column.InputLayer(\n feature_columns=feature_columns,\n name='input_layer',\n create_scope_now=False)\n\n self._add_layer(self._input_layer, 'input_layer')\n\n self._dropout = dropout\n self._batch_norm = batch_norm\n\n self._hidden_layers = []\n self._dropout_layers = []\n self._batch_norm_layers = []\n self._hidden_layer_scope_names = []\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with variable_scope.variable_scope(\n 'hiddenlayer_%d' % layer_id) as hidden_layer_scope:\n hidden_layer = core_layers.Dense(\n units=num_hidden_units,\n activation=activation_fn,\n kernel_initializer=init_ops.glorot_uniform_initializer(),\n name=hidden_layer_scope,\n _scope=hidden_layer_scope)\n self._add_layer(hidden_layer, hidden_layer_scope.name)\n self._hidden_layer_scope_names.append(hidden_layer_scope.name)\n self._hidden_layers.append(hidden_layer)\n if self._dropout is not None:\n dropout_layer = core_layers.Dropout(rate=self._dropout)\n self._add_layer(dropout_layer, dropout_layer.name)\n self._dropout_layers.append(dropout_layer)\n if self._batch_norm:\n batch_norm_layer = normalization.BatchNormalization(\n # The default momentum 0.99 actually crashes on certain\n # problem, so here we use 0.999, which is the default of\n # tf.contrib.layers.batch_norm.\n momentum=0.999,\n trainable=True,\n name='batchnorm_%d' % layer_id,\n _scope='batchnorm_%d' % layer_id)\n self._add_layer(batch_norm_layer, batch_norm_layer.name)\n self._batch_norm_layers.append(batch_norm_layer)\n\n with variable_scope.variable_scope('logits') as logits_scope:\n self._logits_layer = core_layers.Dense(\n units=units,\n activation=None,\n kernel_initializer=init_ops.glorot_uniform_initializer(),\n name=logits_scope,\n _scope=logits_scope)\n self._add_layer(self._logits_layer, logits_scope.name)\n self._logits_scope_name = logits_scope.name\n self._input_layer_partitioner = input_layer_partitioner\n\n def call(self, features, mode):\n is_training = mode == model_fn.ModeKeys.TRAIN\n # The Keras training.Model adds a name_scope with the name of the model\n # which modifies the constructed graph. Hence we add another name_scope\n # here which is the one before the training.Model one was applied.\n # TODO(rohanj): Remove this in TF 2.0 (b/116728605)\n with ops.name_scope(name=_get_previous_name_scope()):\n # TODO(rohanj): Remove dependence on variable scope for partitioning.\n with variable_scope.variable_scope(\n 'input_from_feature_columns',\n partitioner=self._input_layer_partitioner):\n net = self._input_layer(features)\n for i in range(len(self._hidden_layers)):\n net = self._hidden_layers[i](net)\n if self._dropout is not None and is_training:\n net = self._dropout_layers[i](net, training=True)\n if self._batch_norm:\n net = self._batch_norm_layers[i](net, training=is_training)\n _add_hidden_layer_summary(net, self._hidden_layer_scope_names[i])\n\n logits = self._logits_layer(net)\n _add_hidden_layer_summary(logits, self._logits_scope_name)\n return logits\n\n def _add_layer(self, layer, layer_name):\n # \"Magic\" required for keras.Model classes to track all the variables in\n # a list of layers.Layer objects.\n # TODO(ashankar): Figure out API so user code doesn't have to do this.\n setattr(self, layer_name, layer)\n\n\ndef _dnn_model_fn_core(features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer,\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n use_tpu=False,\n batch_norm=False):\n\n if not isinstance(features, dict):\n raise ValueError('features should be a dictionary of `Tensor`s. '\n 'Given type: {}'.format(type(features)))\n\n num_ps_replicas = config.num_ps_replicas if config else 0\n\n partitioner = (None if use_tpu else\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas))\n with variable_scope.variable_scope(\n 'dnn', values=tuple(six.itervalues(features)), partitioner=partitioner):\n input_layer_partitioner = input_layer_partitioner or (\n None if use_tpu else partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas, min_slice_size=64 << 20))\n\n logit_fn = dnn_logit_fn_builder(\n units=head.logits_dimension,\n hidden_units=hidden_units,\n feature_columns=feature_columns,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n batch_norm=batch_norm)\n logits = logit_fn(features=features, mode=mode)\n\n if use_tpu:\n return head._create_tpu_estimator_spec( # pylint: disable=protected-access\n features=features,\n mode=mode,\n labels=labels,\n optimizer=optimizer,\n logits=logits)\n else:\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n optimizer=optimizer,\n logits=logits)\n\n\ndef _dnn_model_fn(features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n use_tpu=False,\n batch_norm=False):\n \"\"\"Deep Neural Net model_fn v1.\n\n Args:\n features: dict of `Tensor`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype\n `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n head: A `head_lib._Head` instance.\n hidden_units: Iterable of integer number of hidden units per layer.\n feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.\n optimizer: String, `tf.Optimizer` object, or callable that creates the\n optimizer to use for training. If not specified, will use the Adagrad\n optimizer with a default learning rate of 0.05.\n activation_fn: Activation function applied to each layer.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n use_tpu: Whether to make a DNN model able to run on TPU. Will make function\n return a `_TPUEstimatorSpec` instance and disable variable partitioning.\n batch_norm: Whether to use batch normalization after each hidden layer.\n\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: If features has the wrong type.\n \"\"\"\n\n optimizer = optimizers.get_optimizer_instance(\n optimizer, learning_rate=_LEARNING_RATE)\n\n return _dnn_model_fn_core(\n features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n use_tpu=use_tpu,\n batch_norm=batch_norm)\n\n\ndef _dnn_model_fn_v2(features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n use_tpu=False,\n batch_norm=False):\n \"\"\"Deep Neural Net model_fn v2.\n\n This function is different than _dnn_model_fn_v1 in the way it handles the\n optimizer when a String optimizer name is passed.\n\n Args:\n features: dict of `Tensor`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype\n `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n head: A `head_lib._Head` instance.\n hidden_units: Iterable of integer number of hidden units per layer.\n feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.\n optimizer: String, `tf.Optimizer` object, or callable that creates the\n optimizer to use for training. If not specified, will use the Adagrad\n optimizer. If it is String, the default learning rate of the optimizer\n will be used. If it is String, and optimizer does not have a default\n learning rate, then, a fixed learning rate of 0.05 is used.\n activation_fn: Activation function applied to each layer.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n use_tpu: Whether to make a DNN model able to run on TPU. Will make function\n return a `_TPUEstimatorSpec` instance and disable variable partitioning.\n batch_norm: Whether to use batch normalization after each hidden layer.\n\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: If features has the wrong type.\n \"\"\"\n optimizer = optimizers.get_optimizer_instance_v2(optimizer)\n\n return _dnn_model_fn_core(\n features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n use_tpu=use_tpu,\n batch_norm=batch_norm)\n\n\n@estimator_export('estimator.DNNClassifier', v1=[])\nclass DNNClassifierV2(estimator.EstimatorV2):\n \"\"\"A classifier for TensorFlow DNN models.\n\n Example:\n\n ```python\n categorical_feature_a = categorical_column_with_hash_bucket(...)\n categorical_feature_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_column=categorical_feature_b, ...)\n\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator using an optimizer with a learning rate decay.\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96))\n\n # Or estimator with warm-starting from a previous checkpoint.\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # Input builders\n def input_fn_train:\n # Returns tf.data.Dataset of (x, y) tuple where y represents label's class\n # index.\n pass\n def input_fn_eval:\n # Returns tf.data.Dataset of (x, y) tuple where y represents label's class\n # index.\n pass\n def input_fn_predict:\n # Returns tf.data.Dataset of (x, None) tuple.\n pass\n estimator.train(input_fn=input_fn_train)\n metrics = estimator.evaluate(input_fn=input_fn_eval)\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column` is not `None`, a feature with `key=weight_column` whose\n value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using softmax cross entropy.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(\n self,\n hidden_units,\n feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,\n batch_norm=False,\n ):\n \"\"\"Initializes a `DNNClassifier` instance.\n\n Args:\n hidden_units: Iterable of number hidden units per layer. All layers are\n fully connected. Ex. `[64, 32]` means first layer has 64 nodes and\n second one has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `_FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n n_classes: Number of label classes. Defaults to 2, namely binary\n classification. Must be > 1.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are\n already encoded as integer or float within [0, 1] for `n_classes=2` and\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\n Also there will be errors if vocabulary is not provided and labels are\n string.\n optimizer: An instance of `tf.Optimizer` used to train the model. Can also\n be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or\n callable. Defaults to Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.\n batch_norm: Whether to use batch normalization after each hidden layer.\n \"\"\"\n head = head_utils.binary_or_multi_class_head(\n n_classes, weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn_v2.\"\"\"\n return _dnn_model_fn_v2(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm)\n\n super(DNNClassifierV2, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n warm_start_from=warm_start_from)\n\n\n@estimator_export(v1=['estimator.DNNClassifier']) # pylint: disable=missing-docstring\nclass DNNClassifier(estimator.Estimator):\n __doc__ = DNNClassifierV2.__doc__.replace('SUM_OVER_BATCH_SIZE', 'SUM')\n\n def __init__(\n self,\n hidden_units,\n feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM,\n batch_norm=False,\n ):\n head = head_lib._binary_logistic_or_multi_class_head( # pylint: disable=protected-access\n n_classes, weight_column, label_vocabulary, loss_reduction)\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn_v2.\"\"\"\n return _dnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm)\n\n super(DNNClassifier, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n warm_start_from=warm_start_from)\n\n\n# TODO(b/117517419): Update these contrib references once head moves to core.\n# Also references to the \"_Head\" class need to be replaced with \"Head\".\n@estimator_export('estimator.DNNEstimator', v1=[])\nclass DNNEstimatorV2(estimator.EstimatorV2):\n \"\"\"An estimator for TensorFlow DNN models with user-specified head.\n\n Example:\n\n ```python\n sparse_feature_a = sparse_column_with_hash_bucket(...)\n sparse_feature_b = sparse_column_with_hash_bucket(...)\n\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNEstimator(\n head=tf.contrib.estimator.multi_label_head(n_classes=3),\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNEstimator(\n head=tf.contrib.estimator.multi_label_head(n_classes=3),\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator using an optimizer with a learning rate decay.\n estimator = DNNEstimator(\n head=tf.contrib.estimator.multi_label_head(n_classes=3),\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96))\n\n # Or estimator with warm-starting from a previous checkpoint.\n estimator = DNNEstimator(\n head=tf.contrib.estimator.multi_label_head(n_classes=3),\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256],\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # Input builders\n def input_fn_train:\n # Returns tf.data.Dataset of (x, y) tuple where y represents label's class\n # index.\n pass\n def input_fn_eval:\n # Returns tf.data.Dataset of (x, y) tuple where y represents label's class\n # index.\n pass\n def input_fn_predict:\n # Returns tf.data.Dataset of (x, None) tuple.\n pass\n estimator.train(input_fn=input_fn_train)\n metrics = estimator.evaluate(input_fn=input_fn_eval)\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column` is not `None`, a feature with `key=weight_column` whose\n value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss and predicted output are determined by the specified head.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n head,\n hidden_units,\n feature_columns,\n model_dir=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n batch_norm=False):\n \"\"\"Initializes a `DNNEstimator` instance.\n\n Args:\n head: A `_Head` instance constructed with a method such as\n `tf.contrib.estimator.multi_label_head`.\n hidden_units: Iterable of number hidden units per layer. All layers are\n fully connected. Ex. `[64, 32]` means first layer has 64 nodes and\n second one has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `_FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n optimizer: An instance of `tf.Optimizer` used to train the model. Can also\n be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or\n callable. Defaults to Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n batch_norm: Whether to use batch normalization after each hidden layer.\n \"\"\"\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn_v2.\"\"\"\n return _dnn_model_fn_v2(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm)\n super(DNNEstimatorV2, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config,\n warm_start_from=warm_start_from)\n\n\n@estimator_export(v1=['estimator.DNNEstimator']) # pylint: disable=missing-docstring\nclass DNNEstimator(estimator.Estimator):\n __doc__ = DNNEstimatorV2.__doc__\n\n def __init__(self,\n head,\n hidden_units,\n feature_columns,\n model_dir=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n batch_norm=False):\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn.\"\"\"\n return _dnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm)\n super(DNNEstimator, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config,\n warm_start_from=warm_start_from)\n\n\n@estimator_export('estimator.DNNRegressor', v1=[])\nclass DNNRegressorV2(estimator.EstimatorV2):\n \"\"\"A regressor for TensorFlow DNN models.\n\n Example:\n\n ```python\n categorical_feature_a = categorical_column_with_hash_bucket(...)\n categorical_feature_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_column=categorical_feature_b, ...)\n\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator using an optimizer with a learning rate decay.\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96))\n\n # Or estimator with warm-starting from a previous checkpoint.\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # Input builders\n def input_fn_train:\n # Returns tf.data.Dataset of (x, y) tuple where y represents label's class\n # index.\n pass\n def input_fn_eval:\n # Returns tf.data.Dataset of (x, y) tuple where y represents label's class\n # index.\n pass\n def input_fn_predict:\n # Returns tf.data.Dataset of (x, None) tuple.\n pass\n estimator.train(input_fn=input_fn_train)\n metrics = estimator.evaluate(input_fn=input_fn_eval)\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column` is not `None`, a feature with `key=weight_column` whose\n value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using mean squared error.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(\n self,\n hidden_units,\n feature_columns,\n model_dir=None,\n label_dimension=1,\n weight_column=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,\n batch_norm=False,\n ):\n \"\"\"Initializes a `DNNRegressor` instance.\n\n Args:\n hidden_units: Iterable of number hidden units per layer. All layers are\n fully connected. Ex. `[64, 32]` means first layer has 64 nodes and\n second one has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `_FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n label_dimension: Number of regression targets per example. This is the\n size of the last dimension of the labels and logits `Tensor` objects\n (typically, these have shape `[batch_size, label_dimension]`).\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n optimizer: An instance of `tf.Optimizer` used to train the model. Can also\n be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or\n callable. Defaults to Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.\n batch_norm: Whether to use batch normalization after each hidden layer.\n \"\"\"\n head = regression_head.RegressionHead(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction)\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn.\"\"\"\n return _dnn_model_fn_v2(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm)\n\n super(DNNRegressorV2, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n warm_start_from=warm_start_from)\n\n\n@estimator_export(v1=['estimator.DNNRegressor']) # pylint: disable=missing-docstring\nclass DNNRegressor(estimator.Estimator):\n __doc__ = DNNRegressorV2.__doc__.replace('SUM_OVER_BATCH_SIZE', 'SUM')\n\n def __init__(\n self,\n hidden_units,\n feature_columns,\n model_dir=None,\n label_dimension=1,\n weight_column=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM,\n batch_norm=False,\n ):\n head = head_lib._regression_head( # pylint: disable=protected-access\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction)\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn.\"\"\"\n return _dnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm)\n\n super(DNNRegressor, self).__init__(\n model_fn=_model_fn,\n model_dir=model_dir,\n config=config,\n warm_start_from=warm_start_from)\n"
] | [
[
"tensorflow.python.util.tf_export.estimator_export",
"tensorflow.python.layers.normalization.BatchNormalization",
"tensorflow.python.feature_column.feature_column_lib.DenseFeatures",
"tensorflow.python.ops.init_ops.glorot_uniform_initializer",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.feature_column.feature_column.InputLayer",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.python.layers.core.Dropout",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.feature_column.feature_column_lib.is_feature_column_v2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"2.2"
]
}
] |
sjforeman/cora | [
"48d127d9e00b1fb1cf2024004d1d1e7441fd1e1f"
] | [
"cora/util/nputil.py"
] | [
"\"\"\"Utility functions to help with pure numpy stuff.\"\"\"\n# === Start Python 2/3 compatibility\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom future.builtins import * # noqa pylint: disable=W0401, W0614\nfrom future.builtins.disabled import * # noqa pylint: disable=W0401, W0614\n\n# === End Python 2/3 compatibility\n\nfrom future.utils import native_str\n\nimport numpy as np\nimport scipy.linalg as la\n\n\ndef save_ndarray_list(fname, la):\n \"\"\"Save a list of numpy arrays to disk.\n\n This is designed so it can be reloaded exactly (with the exact\n same ordering) by `load_ndarray_list`.\n\n Parameters\n ----------\n fname : string\n filename to save to.\n la : list of np.ndarrays\n list of arrays to save.\n \"\"\"\n d1 = {repr(i): v for i, v in enumerate(la)}\n\n np.savez(native_str(fname), **d1)\n\n\ndef load_ndarray_list(fname):\n \"\"\"Load a list of arrays saved by `save_ndarray_list`.\n\n Parameters\n ----------\n fname : string\n filename to load.\n\n Returns\n -------\n la : list of np.ndarrays\n The list of loaded numpy arrays. This should be identical tp\n what was saved by `save_ndarray_list`.\n \"\"\"\n\n d1 = np.load(native_str(fname))\n la = [v for i, v in sorted(iter(d1.items()), key=lambda kv: int(kv[0]))]\n\n return la\n\n\ndef matrix_root_manynull(mat, threshold=1e-16, truncate=True):\n \"\"\"Square root a matrix.\n\n An inefficient alternative to the Cholesky decomposition for a\n matrix with a large dynamic range in eigenvalues. Numerical\n roundoff causes Cholesky to fail as if the matrix were not\n positive semi-definite. This does an explicit eigen-decomposition,\n setting small and negative eigenvalue to zero.\n\n Parameters\n ==========\n mat - ndarray\n An N x N matrix to decompose.\n threshold : scalar, optional\n Set any eigenvalues a factor `threshold` smaller than the\n largest eigenvalue to zero.\n truncate : boolean, optional\n If True (default), truncate the matrix root, to the number of positive\n eigenvalues.\n\n Returns\n =======\n root : ndarray\n The decomposed matrix. This is truncated to the number of\n non-zero eigen values (if truncate is set).\n num_pos : integer\n The number of positive eigenvalues (returned only if truncate is set).\n \"\"\"\n\n # Try to perform a Cholesky first as it's much faster (8x)\n try:\n root = la.cholesky(mat, lower=True)\n num_pos = mat.shape[0]\n\n # If that doesn't work do an eigenvalue and throw out any tiny modes\n except la.LinAlgError:\n evals, evecs = la.eigh(mat)\n\n evals[np.where(evals < evals.max() * threshold)] = 0.0\n num_pos = len(np.flatnonzero(evals))\n\n if truncate:\n evals = evals[np.newaxis, -num_pos:]\n evecs = evecs[:, -num_pos:]\n\n root = evecs * evals[np.newaxis, :] ** 0.5\n\n if truncate:\n return root, num_pos\n else:\n return root\n\n\ndef complex_std_normal(shape):\n \"\"\"Get a set of complex standard normal variables.\n\n Parameters\n ----------\n shape : tuple\n Shape of the array of variables.\n\n Returns\n -------\n var : np.ndarray[shape]\n Complex gaussian variates.\n \"\"\"\n\n return (\n np.random.standard_normal(shape) + 1.0j * np.random.standard_normal(shape)\n ) / 2 ** 0.5\n"
] | [
[
"scipy.linalg.eigh",
"scipy.linalg.cholesky",
"numpy.random.standard_normal",
"numpy.flatnonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
z33bs/multi-agent-deep-RL-solves-tennis | [
"0bb9f47905b8213f73e02890f858aea00070b59d"
] | [
"ddpgagent.py"
] | [
"import random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nfrom network import Network\n\nACTION_SIZE = 2\nSTATE_SIZE = 24\nACTOR_HIDDEN_DIMS = [256, 128]\nCRITIC_HIDDEN_DIMS = [512, 256]\nTAU = 1e-3 # For soft-updates of target\nACTOR_LR = 1e-3\nCRITIC_LR = 1e-3\n\n\nclass DDPGAgent():\n def __init__(self, index, num_agents, seed, device):\n random.seed(seed)\n np.random.seed(seed)\n\n self.index = index\n self.device = device\n\n self.actor_local = Network(STATE_SIZE, ACTOR_HIDDEN_DIMS, ACTION_SIZE, torch.tanh, seed)\n self.actor_target = Network(STATE_SIZE, ACTOR_HIDDEN_DIMS, ACTION_SIZE, torch.tanh, seed)\n self.actor_optimizer = Adam(self.actor_local.parameters(), lr=ACTOR_LR)\n self.critic_local = Network(num_agents * (STATE_SIZE + ACTION_SIZE), CRITIC_HIDDEN_DIMS, 1, None, seed)\n self.critic_target = Network(num_agents * (STATE_SIZE + ACTION_SIZE), CRITIC_HIDDEN_DIMS, 1, None, seed)\n self.critic_optimizer = Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=0)\n\n def act(self, state, random):\n self.actor_local.eval()\n with torch.no_grad():\n action = self.actor_local(torch.from_numpy(state).float().to(self.device)).cpu().data.numpy()\n self.actor_local.train()\n if random is not None:\n action = (1 - random) * action + random * (np.random.rand(ACTION_SIZE) - 0.5) * 2.0\n return np.clip(action, -1, 1)\n\n def learn(self, index, experiences, gamma, all_next_actions, all_actions):\n states, actions, rewards, next_states, dones = experiences\n\n self.critic_optimizer.zero_grad()\n\n index = torch.tensor([index]).to(self.device)\n actions_next = torch.cat(all_next_actions, dim=1).to(self.device)\n with torch.no_grad():\n q_next = self.critic_target(self.critic_input(next_states, actions_next))\n q_exp = self.critic_local(self.critic_input(states, actions))\n q_t = rewards.index_select(1, index) + (gamma * q_next * (1 - dones.index_select(1, index)))\n F.mse_loss(q_exp, q_t.detach()).backward()\n self.critic_optimizer.step()\n\n self.actor_optimizer.zero_grad()\n\n actions_pred = [actions if i == self.index else actions.detach() for i, actions in enumerate(all_actions)]\n actions_pred = torch.cat(actions_pred, dim=1).to(self.device)\n actor_loss = -self.critic_local(self.critic_input(states, actions_pred)).mean()\n actor_loss.backward()\n\n self.actor_optimizer.step()\n\n self.actor_target.soft_update(self.actor_local, TAU)\n self.critic_target.soft_update(self.critic_local, TAU)\n\n def critic_input(self, states, actions):\n return torch.cat((states, actions), dim=1)\n"
] | [
[
"numpy.random.seed",
"torch.cat",
"numpy.clip",
"torch.from_numpy",
"torch.tensor",
"torch.no_grad",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rajevac/deitel-intro-to-python-exercises | [
"05f427d35ebb4bd315904f6919659335b1bf3fc9"
] | [
"07-Array-Oriented-Programming-with-NumPy/7-1-Filling-Arrays.py"
] | [
"import numpy as np\n\nones = np.ones((2, 3), dtype=int)\nprint(ones)\n\nzeros = np.zeros((3, 3), dtype=int)\nprint(zeros)\n\narr_13 = np.full((2, 5), 13)\nprint(arr_13)\n\n"
] | [
[
"numpy.zeros",
"numpy.full",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gene891212/testAI-for-school | [
"6620b352506c74c7be0b98fcc7ca07a3820b3414"
] | [
"python/predict.py"
] | [
"# The steps implemented in the object detection sample code: \n# 1. for an image of width and height being (w, h) pixels, resize image to (w', h'), where w/h = w'/h' and w' x h' = 262144\n# 2. resize network input size to (w', h')\n# 3. pass the image to network and do inference\n# (4. if inference speed is too slow for you, try to make w' x h' smaller, which is defined with DEFAULT_INPUT_SIZE (in object_detection.py or ObjectDetection.cs))\nimport sys\nimport tensorflow as tf\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom object_detection import ObjectDetection\n\nMODEL_FILENAME = 'model.tflite'\nLABELS_FILENAME = 'labels.txt'\n\n\nclass TFLiteObjectDetection(ObjectDetection):\n \"\"\"Object Detection class for TensorFlow Lite\"\"\"\n def __init__(self, model_filename, labels):\n super(TFLiteObjectDetection, self).__init__(labels)\n self.interpreter = tf.lite.Interpreter(model_path=model_filename)\n self.interpreter.allocate_tensors()\n self.input_index = self.interpreter.get_input_details()[0]['index']\n self.output_index = self.interpreter.get_output_details()[0]['index']\n\n def predict(self, preprocessed_image):\n inputs = np.array(preprocessed_image, dtype=np.float32)[np.newaxis, :, :, (2, 1, 0)] # RGB -> BGR and add 1 dimension.\n\n # Resize input tensor and re-allocate the tensors.\n self.interpreter.resize_tensor_input(self.input_index, inputs.shape)\n self.interpreter.allocate_tensors()\n \n self.interpreter.set_tensor(self.input_index, inputs)\n self.interpreter.invoke()\n return self.interpreter.get_tensor(self.output_index)[0]\n\n\n\ndef main(image_filename):\n # Load labels\n with open(LABELS_FILENAME, 'r') as f:\n labels = [l.strip() for l in f.readlines()]\n\n od_model = TFLiteObjectDetection(MODEL_FILENAME, labels)\n\n image = Image.open(image_filename)\n predictions = od_model.predict_image(image)\n\n # Draw rectangle\n for pred in predictions:\n if pred['probability'] > .7:\n pred_bound = pred['boundingBox']\n rect_startwith = (pred_bound['left'] * image.width, pred_bound['top'] * image.height)\n pred_shape = [\n rect_startwith, \n (\n rect_startwith[0] + pred_bound['width'] * image.width,\n rect_startwith[1] + pred_bound['height'] * image.height\n )\n ]\n draw_img = ImageDraw.Draw(image)\n draw_img.rectangle(pred_shape, outline='red')\n\n label = [(pred_shape[0][0], pred_shape[0][1] - 15), (pred_shape[1][0], pred_shape[0][1])]\n draw_img.rectangle(label, fill='red')\n font = ImageFont.truetype(\"arial.ttf\", 16)\n draw_img.text((pred_shape[0][0] + 5, pred_shape[0][1] - 15), pred[\"tagName\"], font=font)\n\n print(predictions)\n image.show()\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n print('USAGE: {} image_filename'.format(sys.argv[0]))\n else:\n main(sys.argv[1])\n"
] | [
[
"tensorflow.lite.Interpreter",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lseventeen/nnUNet | [
"c4972006383e438de1fa7ed7ae318d09fcffc965"
] | [
"nnunet/inference/predict_simple.py"
] | [
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport torch\n\nfrom nnunet.inference.predict import predict_from_folder\nfrom nnunet.paths import default_plans_identifier, network_training_output_dir, default_cascade_trainer, default_trainer\nfrom batchgenerators.utilities.file_and_folder_operations import join, isdir\nfrom nnunet.utilities.task_name_id_conversion import convert_id_to_task_name\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", '--input_folder', help=\"Must contain all modalities for each patient in the correct\"\n \" order (same as training). Files must be named \"\n \"CASENAME_XXXX.nii.gz where XXXX is the modality \"\n \"identifier (0000, 0001, etc)\", \n default='/home/lwt/data/nnUNet_raw_data_base/nnUNet_raw_data/Task017_AbdominalOrganSegmentation/imagesTs')\n parser.add_argument('-o', \"--output_folder\", \n default='/home/lwt/data/nnUNet_raw_data_base/nnUNet_raw_data/Task017_AbdominalOrganSegmentation/predict',\n help=\"folder for saving predictions\")\n parser.add_argument('-t', '--task_name', help='task name or task ID, required.',\n default=\"17\")\n parser.add_argument(\"-ei\", \"--experiment_id\", required=False, default=\"nnunet100_211115180853\")\n # parser.add_argument(\"-i\", '--input_folder', help=\"Must contain all modalities for each patient in the correct\"\n # \" order (same as training). Files must be named \"\n # \"CASENAME_XXXX.nii.gz where XXXX is the modality \"\n # \"identifier (0000, 0001, etc)\", \n # required=True)\n # parser.add_argument('-o', \"--output_folder\", \n # required=True, \n # help=\"folder for saving predictions\")\n # parser.add_argument('-t', '--task_name', help='task name or task ID, required.',\n # default=default_plans_identifier, required=True)\n parser.add_argument('-tr', '--trainer_class_name',\n help='Name of the nnUNetTrainer used for 2D U-Net, full resolution 3D U-Net and low resolution '\n 'U-Net. The default is %s. If you are running inference with the cascade and the folder '\n 'pointed to by --lowres_segmentations does not contain the segmentation maps generated by '\n 'the low resolution U-Net then the low resolution segmentation maps will be automatically '\n 'generated. For this case, make sure to set the trainer class here that matches your '\n '--cascade_trainer_class_name (this part can be ignored if defaults are used).'\n % default_trainer,\n required=False,\n default=default_trainer)\n parser.add_argument('-ctr', '--cascade_trainer_class_name',\n help=\"Trainer class name used for predicting the 3D full resolution U-Net part of the cascade.\"\n \"Default is %s\" % default_cascade_trainer, required=False,\n default=default_cascade_trainer)\n\n parser.add_argument('-m', '--model', help=\"2d, 3d_lowres, 3d_fullres or 3d_cascade_fullres. Default: 3d_fullres\",\n default=\"3d_fullres\", required=False)\n\n parser.add_argument('-p', '--plans_identifier', help='do not touch this unless you know what you are doing',\n default=default_plans_identifier, required=False)\n\n parser.add_argument('-f', '--folds', nargs='+', default=[0],\n help=\"folds to use for prediction. Default is None which means that folds will be detected \"\n \"automatically in the model output folder\")\n # parser.add_argument('-f', '--folds', nargs='+', default='None',\n # help=\"folds to use for prediction. Default is None which means that folds will be detected \"\n # \"automatically in the model output folder\")\n\n parser.add_argument('-z', '--save_npz', required=False, action='store_true',\n help=\"use this if you want to ensemble these predictions with those of other models. Softmax \"\n \"probabilities will be saved as compressed numpy arrays in output_folder and can be \"\n \"merged between output_folders with nnUNet_ensemble_predictions\")\n\n parser.add_argument('-l', '--lowres_segmentations', required=False, default='None',\n help=\"if model is the highres stage of the cascade then you can use this folder to provide \"\n \"predictions from the low resolution 3D U-Net. If this is left at default, the \"\n \"predictions will be generated automatically (provided that the 3D low resolution U-Net \"\n \"network weights are present\")\n\n parser.add_argument(\"--part_id\", type=int, required=False, default=0, help=\"Used to parallelize the prediction of \"\n \"the folder over several GPUs. If you \"\n \"want to use n GPUs to predict this \"\n \"folder you need to run this command \"\n \"n times with --part_id=0, ... n-1 and \"\n \"--num_parts=n (each with a different \"\n \"GPU (for example via \"\n \"CUDA_VISIBLE_DEVICES=X)\")\n\n parser.add_argument(\"--num_parts\", type=int, required=False, default=1,\n help=\"Used to parallelize the prediction of \"\n \"the folder over several GPUs. If you \"\n \"want to use n GPUs to predict this \"\n \"folder you need to run this command \"\n \"n times with --part_id=0, ... n-1 and \"\n \"--num_parts=n (each with a different \"\n \"GPU (via \"\n \"CUDA_VISIBLE_DEVICES=X)\")\n\n parser.add_argument(\"--num_threads_preprocessing\", required=False, default=6, type=int, help=\n \"Determines many background processes will be used for data preprocessing. Reduce this if you \"\n \"run into out of memory (RAM) problems. Default: 6\")\n\n parser.add_argument(\"--num_threads_nifti_save\", required=False, default=2, type=int, help=\n \"Determines many background processes will be used for segmentation export. Reduce this if you \"\n \"run into out of memory (RAM) problems. Default: 2\")\n\n parser.add_argument(\"--disable_tta\", required=False, default=False, action=\"store_true\",\n help=\"set this flag to disable test time data augmentation via mirroring. Speeds up inference \"\n \"by roughly factor 4 (2D) or 8 (3D)\")\n\n parser.add_argument(\"--overwrite_existing\", required=False, default=False, action=\"store_true\",\n help=\"Set this flag if the target folder contains predictions that you would like to overwrite\")\n\n parser.add_argument(\"--mode\", type=str, default=\"normal\", required=False, help=\"Hands off!\")\n parser.add_argument(\"--all_in_gpu\", type=str, default=\"None\", required=False, help=\"can be None, False or True. \"\n \"Do not touch.\")\n parser.add_argument(\"--step_size\", type=float, default=0.5, required=False, help=\"don't touch\")\n # parser.add_argument(\"--interp_order\", required=False, default=3, type=int,\n # help=\"order of interpolation for segmentations, has no effect if mode=fastest. Do not touch this.\")\n # parser.add_argument(\"--interp_order_z\", required=False, default=0, type=int,\n # help=\"order of interpolation along z is z is done differently. Do not touch this.\")\n # parser.add_argument(\"--force_separate_z\", required=False, default=\"None\", type=str,\n # help=\"force_separate_z resampling. Can be None, True or False, has no effect if mode=fastest. \"\n # \"Do not touch this.\")\n parser.add_argument('-chk',\n help='checkpoint name, default: model_final_checkpoint',\n required=False,\n default='model_final_checkpoint')\n parser.add_argument('--disable_mixed_precision', default=False, action='store_true', required=False,\n help='Predictions are done with mixed precision by default. This improves speed and reduces '\n 'the required vram. If you want to disable mixed precision you can set this flag. Note '\n 'that yhis is not recommended (mixed precision is ~2x faster!)')\n\n args = parser.parse_args()\n input_folder = args.input_folder\n output_folder = args.output_folder\n part_id = args.part_id\n num_parts = args.num_parts\n folds = args.folds\n save_npz = args.save_npz\n lowres_segmentations = args.lowres_segmentations\n num_threads_preprocessing = args.num_threads_preprocessing\n num_threads_nifti_save = args.num_threads_nifti_save\n disable_tta = args.disable_tta\n step_size = args.step_size\n # interp_order = args.interp_order\n # interp_order_z = args.interp_order_z\n # force_separate_z = args.force_separate_z\n overwrite_existing = args.overwrite_existing\n mode = args.mode\n all_in_gpu = args.all_in_gpu\n model = args.model\n trainer_class_name = args.trainer_class_name\n cascade_trainer_class_name = args.cascade_trainer_class_name\n\n task_name = args.task_name\n\n if not task_name.startswith(\"Task\"):\n task_id = int(task_name)\n task_name = convert_id_to_task_name(task_id)\n\n assert model in [\"2d\", \"3d_lowres\", \"3d_fullres\", \"3d_cascade_fullres\"], \"-m must be 2d, 3d_lowres, 3d_fullres or \" \\\n \"3d_cascade_fullres\"\n\n # if force_separate_z == \"None\":\n # force_separate_z = None\n # elif force_separate_z == \"False\":\n # force_separate_z = False\n # elif force_separate_z == \"True\":\n # force_separate_z = True\n # else:\n # raise ValueError(\"force_separate_z must be None, True or False. Given: %s\" % force_separate_z)\n\n if lowres_segmentations == \"None\":\n lowres_segmentations = None\n\n if isinstance(folds, list):\n if folds[0] == 'all' and len(folds) == 1:\n pass\n else:\n folds = [int(i) for i in folds]\n elif folds == \"None\":\n folds = None\n else:\n raise ValueError(\"Unexpected value for argument folds\")\n\n assert all_in_gpu in ['None', 'False', 'True']\n if all_in_gpu == \"None\":\n all_in_gpu = None\n elif all_in_gpu == \"True\":\n all_in_gpu = True\n elif all_in_gpu == \"False\":\n all_in_gpu = False\n\n # we need to catch the case where model is 3d cascade fullres and the low resolution folder has not been set.\n # In that case we need to try and predict with 3d low res first\n if model == \"3d_cascade_fullres\" and lowres_segmentations is None:\n print(\"lowres_segmentations is None. Attempting to predict 3d_lowres first...\")\n assert part_id == 0 and num_parts == 1, \"if you don't specify a --lowres_segmentations folder for the \" \\\n \"inference of the cascade, custom values for part_id and num_parts \" \\\n \"are not supported. If you wish to have multiple parts, please \" \\\n \"run the 3d_lowres inference first (separately)\"\n model_folder_name = join(network_training_output_dir, \"3d_lowres\", task_name, trainer_class_name + \"__\" +\n args.plans_identifier)\n assert isdir(model_folder_name), \"model output folder not found. Expected: %s\" % model_folder_name\n lowres_output_folder = join(output_folder, \"3d_lowres_predictions\")\n predict_from_folder(model_folder_name, input_folder, lowres_output_folder, folds, False,\n num_threads_preprocessing, num_threads_nifti_save, None, part_id, num_parts, not disable_tta,\n overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu,\n mixed_precision=not args.disable_mixed_precision,\n step_size=step_size)\n lowres_segmentations = lowres_output_folder\n torch.cuda.empty_cache()\n print(\"3d_lowres done\")\n\n if model == \"3d_cascade_fullres\":\n trainer = cascade_trainer_class_name\n else:\n trainer = trainer_class_name\n\n model_folder_name = join(network_training_output_dir, model, task_name, trainer + \"__\" +\n args.plans_identifier)\n print(\"using model stored in \", model_folder_name)\n assert isdir(model_folder_name), \"model output folder not found. Expected: %s\" % model_folder_name\n\n predict_from_folder(model_folder_name, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,\n num_threads_nifti_save, lowres_segmentations, part_id, num_parts, not disable_tta,\n overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu,\n mixed_precision=not args.disable_mixed_precision,\n step_size=step_size, checkpoint_name=args.chk,experiment_id=args.experiment_id)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.cuda.empty_cache"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
flaviorangel/test-joss-paper | [
"480eae9c685ed3d96e9c5971d0b9ea1356eaf7d9"
] | [
"ross/tests/test_bearing_seal_element.py"
] | [
"import numpy as np\nimport pytest\nimport os\nfrom numpy.testing import assert_allclose\nimport math\n\nfrom ross.bearing_seal_element import (\n BearingElement,\n BallBearingElement,\n RollerBearingElement,\n)\n\n\[email protected]\ndef bearing0():\n Kxx_bearing = np.array(\n [8.5e07, 1.1e08, 1.3e08, 1.6e08, 1.8e08, 2.0e08, 2.3e08, 2.5e08, 2.6e08]\n )\n Kyy_bearing = np.array(\n [9.2e07, 1.1e08, 1.4e08, 1.6e08, 1.9e08, 2.1e08, 2.3e08, 2.5e08, 2.6e08]\n )\n Cxx_bearing = np.array(\n [226837, 211247, 197996, 185523, 174610, 163697, 153563, 144209, 137973]\n )\n Cyy_bearing = np.array(\n [235837, 211247, 197996, 185523, 174610, 163697, 153563, 144209, 137973]\n )\n wb = np.array([314.2, 418.9, 523.6, 628.3, 733.0, 837.8, 942.5, 1047.2, 1151.9])\n bearing0 = BearingElement(\n 4,\n kxx=Kxx_bearing,\n kyy=Kyy_bearing,\n cxx=Cxx_bearing,\n cyy=Cyy_bearing,\n frequency=wb,\n )\n return bearing0\n\n\ndef test_bearing_interpol_kxx(bearing0):\n assert_allclose(bearing0.kxx.interpolated(314.2), 8.5e7)\n assert_allclose(bearing0.kxx.interpolated(1151.9), 2.6e8)\n\n\ndef test_bearing_interpol_kyy(bearing0):\n assert_allclose(bearing0.kyy.interpolated(314.2), 9.2e7)\n assert_allclose(bearing0.kyy.interpolated(1151.9), 2.6e8)\n\n\ndef test_bearing_interpol_cxx(bearing0):\n assert_allclose(bearing0.cxx.interpolated(314.2), 226837, rtol=1e5)\n assert_allclose(bearing0.cxx.interpolated(1151.9), 137973, rtol=1e5)\n\n\ndef test_bearing_interpol_cyy(bearing0):\n assert_allclose(bearing0.kxx.interpolated(314.2), 235837, rtol=1e5)\n assert_allclose(bearing0.kxx.interpolated(1151.9), 2.6e8, rtol=1e5)\n\n\[email protected]\ndef bearing1():\n # using lists\n Kxx_bearing = [\n 8.5e07,\n 1.1e08,\n 1.3e08,\n 1.6e08,\n 1.8e08,\n 2.0e08,\n 2.3e08,\n 2.5e08,\n 2.6e08,\n ]\n Kyy_bearing = np.array(\n [9.2e07, 1.1e08, 1.4e08, 1.6e08, 1.9e08, 2.1e08, 2.3e08, 2.5e08, 2.6e08]\n )\n Cxx_bearing = np.array(\n [226837, 211247, 197996, 185523, 174610, 163697, 153563, 144209, 137973]\n )\n Cyy_bearing = np.array(\n [235837, 211247, 197996, 185523, 174610, 163697, 153563, 144209, 137973]\n )\n wb = [314.2, 418.9, 523.6, 628.3, 733.0, 837.8, 942.5, 1047.2, 1151.9]\n bearing1 = BearingElement(\n 4,\n kxx=Kxx_bearing,\n kyy=Kyy_bearing,\n cxx=Cxx_bearing,\n cyy=Cyy_bearing,\n frequency=wb,\n )\n return bearing1\n\n\ndef test_index(bearing1):\n assert bearing1.dof_local_index()[0] == 0\n assert bearing1.dof_local_index().x_0 == 0\n assert bearing1.dof_local_index()[1] == 1\n assert bearing1.dof_local_index().y_0 == 1\n assert bearing1.dof_global_index().x_4 == 16\n assert bearing1.dof_global_index().y_4 == 17\n assert bearing1.dof_global_index()[-1] == 17\n\n\ndef test_bearing1_interpol_kxx(bearing1):\n assert_allclose(bearing1.kxx.interpolated(314.2), 8.5e7)\n assert_allclose(bearing1.kxx.interpolated(1151.9), 2.6e8)\n\n\ndef test_bearing1_interpol_kyy(bearing1):\n assert_allclose(bearing1.kyy.interpolated(314.2), 9.2e7)\n assert_allclose(bearing1.kyy.interpolated(1151.9), 2.6e8)\n\n\ndef test_bearing1_interpol_cxx(bearing1):\n assert_allclose(bearing1.cxx.interpolated(314.2), 226837, rtol=1e5)\n assert_allclose(bearing1.cxx.interpolated(1151.9), 137973, rtol=1e5)\n\n\ndef test_bearing1_interpol_cyy(bearing1):\n assert_allclose(bearing1.kxx.interpolated(314.2), 235837, rtol=1e5)\n assert_allclose(bearing1.kxx.interpolated(1151.9), 2.6e8, rtol=1e5)\n\n\ndef test_bearing1_matrices(bearing1):\n # fmt: off\n K = np.array([[85000000.043218, 0. ],\n [ 0. , 91999999.891728]])\n C = np.array([[226836.917649, 0. ],\n [ 0. , 235836.850213 ]])\n # fmt: on\n assert_allclose(bearing1.K(314.2), K)\n assert_allclose(bearing1.C(314.2), C)\n\n\ndef test_bearing_error_speed_not_given():\n speed = np.linspace(0, 10000, 5)\n kx = 1e8 * speed\n cx = 1e8 * speed\n with pytest.raises(Exception) as excinfo:\n BearingElement(-1, kxx=kx, cxx=cx)\n assert (\n \"Arguments (coefficients and frequency)\"\n \" must have the same dimension\" in str(excinfo.value)\n )\n\n\ndef test_bearing_error2():\n with pytest.raises(ValueError) as excinfo:\n BearingElement(\n 4, kxx=[7e8, 8e8, 9e8], cxx=[0, 0, 0, 0], frequency=[10, 100, 1000, 10000]\n )\n assert (\n \"Arguments (coefficients and frequency) \"\n \"must have the same dimension\" in str(excinfo.value)\n )\n\n with pytest.raises(ValueError) as excinfo:\n BearingElement(4, kxx=[6e8, 7e8, 8e8, 9e8], cxx=[0, 0, 0, 0, 0])\n assert (\n \"Arguments (coefficients and frequency) \"\n \"must have the same dimension\" in str(excinfo.value)\n )\n\n\[email protected]\ndef bearing_constant():\n bearing = BearingElement(n=4, kxx=8e7, cxx=0)\n return bearing\n\n\ndef test_bearing_constant(bearing_constant):\n assert_allclose(bearing_constant.kxx.interpolated(314.2), 8e7, rtol=1e5)\n assert_allclose(bearing_constant.cxx.interpolated(300.9), 0, rtol=1e5)\n\n\ndef test_bearing_len_2():\n bearing = BearingElement(\n n=0,\n kxx=[481, 4810],\n cxx=[3.13, 10.81],\n kyy=[481, 4810],\n kxy=[194, 2078],\n kyx=[-194, -2078],\n cyy=[3.13, 10.81],\n cxy=[0.276, 0.69],\n cyx=[-0.276, -0.69],\n frequency=[115.19, 345.575],\n )\n assert_allclose(bearing.kxx.interpolated(115.19), 481, rtol=1e5)\n\n\ndef test_bearing_len_3():\n bearing = BearingElement(\n n=0,\n kxx=[481, 4810, 18810],\n cxx=[3.13, 10.81, 22.99],\n kyy=[481, 4810, 18810],\n kxy=[194, 2078, 8776],\n kyx=[-194, -2078, -8776],\n cyy=[3.13, 10.81, 22.99],\n cxy=[0.276, 0.69, 1.19],\n cyx=[-0.276, -0.69, -1.19],\n frequency=[115.19, 345.575, 691.15],\n )\n assert_allclose(bearing.kxx.interpolated(115.19), 481, rtol=1e5)\n\n\ndef test_equality(bearing0, bearing1, bearing_constant):\n assert bearing0 == bearing0\n assert bearing0 == bearing1\n assert not bearing0 == bearing_constant\n assert not bearing0 == 1\n\n\[email protected](\"Waiting for implementation of units conversion for bearings.\")\ndef test_from_table():\n bearing_file = (\n os.path.dirname(os.path.realpath(__file__)) + \"/data/bearing_seal_si.xls\"\n )\n\n bearing = BearingElement.from_table(0, bearing_file)\n assert bearing.n == 0\n assert_allclose(bearing.w[2], 523.5987755985)\n assert_allclose(bearing.kxx.coefficient[2], 53565700)\n\n # bearing with us units\n bearing_file = (\n os.path.dirname(os.path.realpath(__file__)) + \"/data/bearing_seal_us.xls\"\n )\n bearing = BearingElement.from_table(0, bearing_file)\n assert bearing.n == 0\n assert_allclose(bearing.w[2], 523.5987755985)\n assert_allclose(bearing.kxx.coefficient[2], 53565700)\n\n\ndef test_bearing_link_global_index():\n b0 = BearingElement(n=0, n_link=3, kxx=1, cxx=1)\n idx = b0.dof_global_index()\n assert idx.x_0 == 0\n assert idx.y_0 == 1\n print(idx)\n assert idx.x_3 == 12\n assert idx.y_3 == 13\n\n\ndef test_bearing_link_matrices():\n b0 = BearingElement(n=0, n_link=3, kxx=1, cxx=1)\n # fmt: off\n M = np.array(\n [[1, 0, -1, 0],\n [0, 1, 0, -1],\n [-1, 0, 1, 0],\n [0, -1, 0, 1]]\n )\n # fmt: on\n\n assert_allclose(b0.K(0), M)\n assert_allclose(b0.C(0), M)\n\n\ndef test_ball_bearing_element():\n n = 0\n n_balls = 8\n d_balls = 0.03\n fs = 500.0\n alpha = np.pi / 6\n tag = \"ballbearing\"\n ballbearing = BallBearingElement(\n n=n, n_balls=n_balls, d_balls=d_balls, fs=fs, alpha=alpha, tag=tag\n )\n\n M = np.zeros((2, 2))\n K = np.array([[4.64168838e07, 0.00000000e00], [0.00000000e00, 1.00906269e08]])\n C = np.array([[580.2110481, 0.0], [0.0, 1261.32836543]])\n G = np.zeros((2, 2))\n\n assert_allclose(ballbearing.M(), M)\n assert_allclose(ballbearing.K(0), K)\n assert_allclose(ballbearing.C(0), C)\n assert_allclose(ballbearing.G(), G)\n\n\ndef test_roller_bearing_element():\n n = 0\n n_rollers = 8\n l_rollers = 0.03\n fs = 500.0\n alpha = np.pi / 6\n tag = \"rollerbearing\"\n rollerbearing = RollerBearingElement(\n n=n, n_rollers=n_rollers, l_rollers=l_rollers, fs=fs, alpha=alpha, tag=tag\n )\n\n M = np.zeros((2, 2))\n K = np.array([[2.72821927e08, 0.00000000e00], [0.00000000e00, 5.56779444e08]])\n C = np.array([[3410.27409251, 0.0], [0.0, 6959.74304593]])\n G = np.zeros((2, 2))\n\n assert_allclose(rollerbearing.M(), M)\n assert_allclose(rollerbearing.K(0), K)\n assert_allclose(rollerbearing.C(0), C)\n assert_allclose(rollerbearing.G(), G)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
toedtli/rg_text_to_sound | [
"871d51918a34743067fb6fd58534f5eabd28c2f5"
] | [
"playground/beat_toedtli/mymodels/UnifiedKeywordExtractor.py"
] | [
"import numpy as np\n#import tensorflow as tf\n#import tensorflow_hub as hub\n#import tensorflow_text as text # Registers the ops.\nfrom tts_pipeline.pipelines.waterfall.pipeline import (\n WaterfallKeywordExtractor,\n WaterfallEmbedder,\n WaterfallDimensionalityReducer\n)\nimport spacy\nfrom sklearn.cluster import KMeans\nfrom tts_pipeline.core import InferenceModel\n\n\nclass VelocityEstimator(WaterfallKeywordExtractor):\n def __init__(self,model='en_core_web_lg',slow_str='',quick_str=''):\n self.slow_str = slow_str\n self.quick_str = quick_str\n self.model = model\n\n def build(self):\n # Load English tokenizer, tagger, parser and NER\n \n #TODO: Check if it is downloaded, and if needed download it.\n self.nlp = spacy.load(self.model)\n\n if not slow_str:\n slow_str=self.slow_str\n slow_str =\"slow, super slow, snail, unhurried, leisurely, measured, moderate, deliberate, steady, sedate, slow-moving, slow-going, easy, relaxed, unrushed, gentle, undemanding, comfortable, ponderous, plodding, laboured, dawdling, loitering, lagging, laggard, sluggish, sluggardly, snail-like, tortoise-like, leaden-footed, leaden, creeping, laggy, lollygagging, calm, gently, docile, friendly, easy, dull, tediously, lazy, sleepily, tardy, indolent, graceful, largo, adagio, sluggish, relaxed, casual, belatedly, tardily, ritardando, latterly, lately, lenient, poor, dully, lethargically\"\n if not quick_str:\n quick_str=self.quick_str\n quick_str = \"speedy, quick, swift, rapid, brisk, nimble, sprightly, lively, fast-moving, high-speed, turbo, sporty, accelerated, express, flying, whirlwind, blistering, breakneck, pell-mell, meteoric, smart, hasty, hurried, unhesitating, expeditious, fleet-footed,supersonic, fleet, tantivy, alacritous, volant, secure, secured, fastened, tight, firmly fixed, stuck, jammed, immovable, unbudgeable, stiff, closed, shut, to, attach, fasten, fix, affix, join, connect, couple, quickly, rapidly, swiftly, speedily, briskly, at speed, at full speed, at full tilt, energetically, hastily,with all haste, in haste, hurriedly, in a hurry, post-haste, pell-mell, without delay, expeditiously, with dispatch, like a shot, like a flash, in a flash, in the blink of an eye, in a wink, in a trice, in no time, in no time at all, on the double, at the speed of light, like an arrow from a bow, double quick, in double quick time, pretty damn quick, nippily, like lightning, at warp speed, like mad, like crazy, like the wind,\"\n slow_list = [word.strip() for word in slow_str.split(',')]\n quick_list = [word.strip() for word in quick_str.split(',')]\n self.docs_slow = [self.nlp(f'Give me a {token.strip()} guitar. ') for token in slow_list]\n self.docs_quick = [self.nlp(f'Give me a {token.strip()} guitar. ') for token in quick_list]\n\n def get_mean_similarity(self,doc):\n sim_vals_slow = [doc_c.similarity(doc) for doc_c in self.docs_slow]\n sim_vals_quick = [doc_c.similarity(doc) for doc_c in self.docs_quick]\n xs=np.median(sim_vals_slow)\n xq=np.median(sim_vals_quick)\n return xs,xq\n\n def predict(self, sentence: str) -> dict:\n tokens = sentence.split(\" \")\n lengths = [len(x) for x in tokens]\n max_len_idx = [i for i,l in enumerate(lengths) if l==max(lengths)][0]\n instrument = tokens[max_len_idx]\n\n\n xs,xq = self.get_mean_similarity(self.nlp(sentence))\n velocity = (xs-xq)/0.006\n \n return velocity\n\n def dispose(self):\n del self.nlp\n del self.docs_slow\n del self.docs_quick\n\n\nclass WordToWordsMatcher(WaterfallKeywordExtractor):\n def __init__(self,target_words,model='en_core_web_lg'):\n self.target_words = target_words\n self.model=model\n\n def build(self):\n self.target_tokens = np.array(self.target_words)\n self.nlp = spacy.load(self.model)\n vector_array = self.get_vector_array(target_words)\n\n self.clusterer = KMeans(n_clusters=vector_array.shape[0],init='random')\n self.clusterer.cluster_centers_ = vector_array\n\n def get_vector_array(self,word_list,verbose=False):\n docstr = \" \".join(word_list)\n target_tokens_doc = self.nlp(docstr)\n vector_list = []\n for token in target_tokens_doc:\n if verbose:\n print(token.text, token.has_vector, token.vector_norm, token.is_oov)\n vector_list.append(token.vector)\n return np.array(vector_list)\n\n def match_word_to_words(new_word):\n vector_array = self.get_vector_array(words)\n return clusterer.predict(vector_array.reshape(1,-1))\n\n def predict(self,words):\n \"\"\"\n for a list of words, return a list of target words\n >>>target_words = ['slow', 'quick', 'yellow', 'loud', 'hard']\n >>>wwm = word_to_words_matcher()\n >>>wwm.build(target_words)\n >>>wwm.predict(target_words)\n >>>wwm.predict(['rigid','stiff']).tolist()\n <output still to be checked, hopefully ['hard','hard']>\n \"\"\"\n vector_array = self.get_vector_array(words)\n clusterind = self.clusterer.predict(vector_array)\n return self.target_tokens[clusterind].tolist()\n\n def dispose(self):\n del self.nlp\n\n def test_word_to_words_matcher(self):\n \"\"\"\n Code that might later be used to create tests.\n \"\"\"\n import nltk\n from nltk.corpus import wordnet\n from collections import defaultdict\n target_words = ['slow', 'quick', 'yellow', 'loud', 'hard']\n wwm = word_to_words_matcher()\n wwm.build(target_words)\n\n wwm.predict(target_words)\n\n wwm.predict(['rigid','stiff']).tolist()\n\n \"\"\"# Test the matching on synsets of the target words:\"\"\"\n\n nltk.download('wordnet')\n\n def get_synonyms(word):\n synonyms = []\n for syn in wordnet.synsets(word):\n for lm in syn.lemmas():\n synonyms.append(lm.name())\n return set(synonyms)\n\n ','.join(wwm.predict(get_synonyms('quick')))\n\n good_dict = bad_dict=defaultdict(list)\n\n for target_word in target_words:\n for word in get_synonyms(target_word):\n prediction = wwm.predict([word])[0]\n if prediction==target_word:\n #print(f'{word}->{target_word}:ok!')\n good_dict[target_word]+=[word]\n else:\n #print(word,':',f'{prediction} (should be {target_word})')\n bad_dict[target_word]+=[word]\n\n print('synset words that were not mapped back to the target word:')\n for key,val in bad_dict.items():\n print(key,val)\n\n print('synset words that were correctly mapped back to the target word:')\n for key,val in good_dict.items():\n print(key,val)\n\nclass UnifiedKeywordExtractor(WaterfallKeywordExtractor):\n def __init__(self,target_words,slow_str='',quick_str=''):\n self.word_to_words_matcher = WordToWordsMatcher(target_words)\n self.velocity_estimator= VelocityEstimator(slow_str=slow_str,quick_str=quick_str)\n\n def build(self): \n self.word_to_words_matcher.build() \n self.velocity_estimator.build()\n\n def predict(self,sentence):\n #estimate the velocity given in sentence\n velocity = self.velocity_estimator.predict(sentence)\n #somehow generate a list of words given the sentence. This is still a bit too crude.\n #TODO: \n # - stopword removal, \n # - reduction to the relevant words in the sentence, e.g. only nounds or adjectives\n # - use more sophisticated tokenization?\n word_list = sentence.split(' ')\n #match the given word list to the target word list\n matched_words = self.word_to_words_matcher.predict(word_list) \n d['soundquality']=matched_words\n d['velocity']=velocity\n return d\n\n def dispose(self):\n self.word_to_words_matcher.dispose()\n self.velocity_estimator.dispose()\n\nif __name__=='__main__':\n target_words = ['slow', 'quick', 'yellow', 'loud', 'hard']\n uf = UnifiedKeywordExtractor(target_words)\n uf.build()\n \n uf.build(target_words)\n us.predict('give me a slow, dark guitar sound')\n"
] | [
[
"numpy.median",
"numpy.array",
"sklearn.cluster.KMeans"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PierBeneventano/test_pytorch | [
"287437b333c36e27f9ab97ab627a2e0a12683fed"
] | [
"make_makefile_3a.py"
] | [
"import os\nimport numpy as np\ncwd = os.getcwd() # get the current working directory\n\ndef create_makefile(choice_dict):\n f = open(f\"{cwd}/makefile\",'w')\n f.write(\".PHONY = help setup test run clean environment\\n\\n\")\n f.write(\".DEFAULT_GOAL = setup\\n\")\n f.write(\"setup:\\n\")\n\n for batch_size in [64, 128, 1024, 4096]:\n for lr_choices in choice_dict['learning_rate']:\n f.write(f\"\\t@python3 main_synthetic.py --input_gaussian_noise 0.1 --lr {lr_choices} --batchsize {batch_size} --epochs 30\\n\")\n\n\nif __name__ == \"__main__\":\n # creating choice dictionary\n choice_dict = {}\n\n net_choices = np.array(['MLP', 'linear', 'conv'])\n label_noise_choices = np.array([0.5, 0.2, 0.1])\n g_noise_choices = np.array([0.2, 0.1, 0.02])\n noise_sched_choices = np.array(['decay', 'fixed'])\n lr_choices = np.array([0.1, 1])\n\n choice_dict['net'] = net_choices\n choice_dict['label_noise_prob'] = label_noise_choices\n choice_dict['noise_sched'] = noise_sched_choices\n choice_dict['gaussian_noise_sigma'] = g_noise_choices\n choice_dict['learning_rate'] = lr_choices\n\n create_makefile(choice_dict)\n \n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shankhiremath/BT5051_CFA | [
"7b57801bc74ad16ca97663087dd28aa1228efe67"
] | [
"2D_case1.py"
] | [
"#CASE NO. 1\n#Input is 4 adjacent people at the top of the 2D grid\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\n\nprint(\"2D diffusion equation solver\")\n\nppl_length = 10\nmax_iter_time = 150\n\nD = 2 #Diffusivity\n\ndelta_x = 1 #delta_x = delta_y = 1\n\ndelta_t = (delta_x ** 2)/(4 * D) #condition for numerical stability\ngamma = (D * delta_t) / (delta_x ** 2)\n\n# Initialize solution: the grid of u(k, i, j) k = time, i = x, j = y\nu = np.empty((max_iter_time, ppl_length, ppl_length))\n\n# Initial condition everywhere inside the grid\nu_initial = 0\n\nu_max = 100.0\nu_min = 0.0\n\n# Set the initial condition\nu.fill(u_initial)\n\n# Set the boundary conditions\nu[:, (ppl_length-1):, 3:8] = u_max\nu[:, :, :1] = u_min\nu[:, :1, 1:] = u_min\nu[:, :, (ppl_length-1):] = u_min\n\nppl_hist = [] #list to keep track of number of people above a threshold of information density\n\ndef calculate(u, threshold):\n for k in range(0, max_iter_time-1, 1):\n for i in range(1, ppl_length-1, delta_x):\n for j in range(1, ppl_length-1, delta_x):\n u[k + 1, i, j] = gamma * (u[k][i+1][j] + u[k][i-1][j] + u[k][i][j+1] + u[k][i][j-1] - 4*u[k][i][j]) + u[k][i][j]\n h = sum(1 for i in u[k, :, :].flatten() if i>= threshold)\n ppl_hist.append(h)\n\n return u\n\ndef plotdensitymap(u_k, k):\n # Clear the current plot figure\n plt.clf()\n\n plt.title(f\"Case 1: Information map at t = {k} iterations\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n\n # This is to plot u_k (u at time-step k)\n plt.pcolormesh(u_k, cmap='jet', vmin=0, vmax=100)\n plt.colorbar()\n\n return plt\n\n# Do the calculation here with a set threshold value\nu = calculate(u, 20)\n\ndef animate(k):\n plotdensitymap(u[k], k)\n\nanim = animation.FuncAnimation(plt.figure(), animate, interval=1, frames=max_iter_time, repeat=False)\nanim.save('/home/shashank/Shashank Hiremath/Sem 5 content/Transport phenomena/My CFA Stuff/Images/case1.gif', writer='imagemagick', fps=100)\n\nplt.show()\n\nplt.figure()\nplt.plot(ppl_hist)\nplt.title(\"The diffusion of information in Case 1\")\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"No. of people above threshold\")\nplt.show()\n\n#Histogram to understand variation in information density values\ndata = u[max_iter_time-1].flatten()\nbins_list = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\nplt.hist(data, bins_list)\nplt.title(\"Histogram of I(x,y, maximum iteration) in Case 1\")\nplt.xlabel(\"I(x,y) value\")\nplt.ylabel(\"No. of people\")\nplt.show()\n\nprint(\"Done!\")\n"
] | [
[
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GW-Wang-thu/StrainNet3D | [
"cfb235fe03685e3b5bb68c3700b0810a3aa62ca4"
] | [
"Preprocess.py"
] | [
"import numpy as np\nimport cv2\nimport os\nimport re\n\n\ndef gen_dataset(in_dir, out_dir, train_percent, start_idx=300):\n filenames = os.listdir(in_dir)\n filenames = [os.path.join(in_dir, f) for f in filenames if f.endswith('LR.tif')]\n for i in range(len(filenames)):\n print('\\r', '%d of %d finished ' % (i, len(filenames)), end='\\b')\n if i < start_idx:\n continue\n proj_name = filenames[i][-11:]\n k = re.sub(\"\\D\", \"\", proj_name)\n LR_img = cv2.imread(in_dir + str(k) + \"_LR.tif\", cv2.IMREAD_GRAYSCALE)\n LD_img = cv2.imread(in_dir + str(k) + \"_LD.tif\", cv2.IMREAD_GRAYSCALE)\n RR_img = cv2.imread(in_dir + str(k) + \"_RR.tif\", cv2.IMREAD_GRAYSCALE)\n RD_img = cv2.imread(in_dir + str(k) + \"_RD.tif\", cv2.IMREAD_GRAYSCALE)\n Disp_X = np.loadtxt(in_dir + str(k) + \"_Disparity_DX.csv\")\n Disp_Y = np.loadtxt(in_dir + str(k) + \"_Disparity_DY.csv\")\n Flow_X = np.loadtxt(in_dir + str(k) + \"LFU.csv\")\n Flow_Y = np.loadtxt(in_dir + str(k) + \"LFV.csv\")\n LUVW_U = np.loadtxt(in_dir + str(k) + \"_LWU.csv\")\n LUVW_V = np.loadtxt(in_dir + str(k) + \"_LWV.csv\")\n LUVW_W = np.loadtxt(in_dir + str(k) + \"_LWW.csv\")\n\n disp_stackedimg = np.stack([LD_img, RD_img], axis=0)\n flow_stackedimg = np.stack([LR_img, LD_img], axis=0)\n\n disparity_stacked = np.stack([Disp_X, Disp_Y], axis=0)\n opticalflow_stacked = np.stack([Flow_X, Flow_Y], axis=0)\n LUVW_stacked = np.stack([LUVW_U, LUVW_V, LUVW_W], axis=0)\n\n np.random.seed(i)\n if np.random.rand() < train_percent:\n output_dir = out_dir + \"Train/\"\n else:\n output_dir = out_dir + \"Valid/\"\n\n randomnum = np.random.randint(1, 10) * 1000000 + 30000000\n np.save(output_dir + str(randomnum + i * 2 + 1 + start_idx) + \"_LDRD_Imgs.npy\", disp_stackedimg)\n np.save(output_dir + str(randomnum + i * 2 + 1 + start_idx) + \"_LRLD_Imgs.npy\", flow_stackedimg)\n np.save(output_dir + str(randomnum + i * 2 + 1 + start_idx) + \"_Disparity.npy\", disparity_stacked)\n np.save(output_dir + str(randomnum + i * 2 + 1 + start_idx) + \"_LFlow.npy\", opticalflow_stacked)\n np.save(output_dir + str(randomnum + i * 2 + 1 + start_idx) + \"_UVW.npy\", LUVW_stacked)\n\n\n\nif __name__ == '__main__':\n gen_dataset(in_dir=\"D:\\\\Guowen\\\\DLDIC_3D\\\\dataset\\\\data3\\\\\", out_dir=\"D:\\\\Guowen\\\\DLDIC_3D\\\\dataset\\\\\", train_percent=0.85, start_idx=0)\n"
] | [
[
"numpy.random.seed",
"numpy.random.rand",
"numpy.stack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cvigoe/DRL4MAAS | [
"95539197c9b82a34f9128fd265749d0f8f76157f"
] | [
"rlkit/rlkit/torch/policy_gradient/mdpo.py"
] | [
"\"\"\"\nImplementation of Mirror Descent Policy Optimization.\nhttps://arxiv.org/pdf/1707.06347.pdf\n\nAuthor: Ian Char\nDate: April 10, 2021\n\"\"\"\nfrom collections import OrderedDict, namedtuple\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom rlkit.core.loss import LossFunction, LossStatistics\nfrom torch import nn as nn\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_rl_algorithm import TorchTrainer\nfrom rlkit.core.logging import add_prefix\nimport gtimer as gt\n\nMDPOLosses = namedtuple(\n 'MDPOLosses',\n 'policy_loss val_loss',\n)\n\nclass MDPOTrainer(TorchTrainer, LossFunction):\n def __init__(\n self,\n env,\n policy,\n target_policy,\n val,\n epoch_iterations,\n\n discount=0.99,\n policy_lr=3e-4,\n val_lr=1e-3,\n optimizer_class=optim.Adam,\n ):\n super().__init__()\n self.env = env\n self.policy = policy\n self.target_policy = target_policy\n self.val = val\n self.discount=discount\n self.val_criterion = nn.MSELoss()\n self.epoch_iterations = epoch_iterations\n self.tk = 0\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(),\n lr=policy_lr,\n )\n self.val_optimizer = optimizer_class(\n self.val.parameters(),\n lr=val_lr,\n )\n self._n_train_steps_total = 0\n self._need_to_update_eval_statistics = True\n self.eval_statistics = OrderedDict()\n ptu.soft_update_from_to(\n self.policy, self.target_policy, 0\n )\n\n def train_from_torch(self, batch):\n gt.blank_stamp()\n losses, stats = self.compute_loss(\n batch,\n skip_statistics=not self._need_to_update_eval_statistics,\n )\n \"\"\"\n Update networks\n \"\"\"\n self.policy_optimizer.zero_grad()\n losses.policy_loss.backward()\n self.policy_optimizer.step()\n\n self.val_optimizer.zero_grad()\n losses.val_loss.backward()\n self.val_optimizer.step()\n\n self._n_train_steps_total += 1\n\n if self._need_to_update_eval_statistics:\n self.eval_statistics = stats\n # Compute statistics using only one batch per epoch\n self._need_to_update_eval_statistics = False\n gt.stamp('ppo training', unique=False)\n\n def compute_loss(\n self,\n batch,\n skip_statistics=False,\n ) -> Tuple[MDPOLosses, LossStatistics]:\n obs = batch['observations']\n advantages = batch['advantages']\n targets = batch['targets']\n oldpis = batch['logpis']\n rewards = batch['rewards']\n terminals = batch['terminals']\n actions = batch['actions']\n next_obs = batch['next_observations']\n # Normalize the advanatages.\n advantages = ((advantages - advantages.mean())\n / (advantages.std() + 1e-8))\n # Compute Policy loss.\n dist = self.policy(obs)\n log_pi = dist.log_prob(actions).unsqueeze(-1)\n weighted_adv = (log_pi - oldpis).exp() * advantages\n # Compute the KL Penalty.\n imaginary_acts, im_currpi = dist.rsample_and_logprob()\n olddist = self.target_policy(obs)\n with torch.no_grad():\n im_oldpi = olddist.log_prob(imaginary_acts).unsqueeze(-1)\n kl_div = im_currpi - im_oldpi\n # Combine to get policy loss.\n policy_loss = -torch.mean((weighted_adv\n - min(self.tk / self.epoch_iterations, 1) * kl_div))\n # Compute the value loss.\n val_ests = self.val(obs)\n val_loss = self.val_criterion(val_ests, targets)\n\n \"\"\"\n Save some statistics for eval\n \"\"\"\n eval_statistics = OrderedDict()\n if not skip_statistics:\n eval_statistics['Value Loss'] = np.mean(ptu.get_numpy(val_loss))\n eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n eval_statistics.update(create_stats_ordered_dict(\n 'Value Estimates',\n ptu.get_numpy(val_ests),\n ))\n eval_statistics.update(create_stats_ordered_dict(\n 'Log Pis',\n ptu.get_numpy(log_pi),\n ))\n eval_statistics.update(create_stats_ordered_dict(\n 'KL Div',\n ptu.get_numpy(kl_div),\n ))\n policy_statistics = add_prefix(dist.get_diagnostics(), \"policy/\")\n eval_statistics.update(policy_statistics)\n\n loss = MDPOLosses(\n policy_loss=policy_loss,\n val_loss=val_loss,\n )\n\n return loss, eval_statistics\n\n def get_diagnostics(self):\n stats = super().get_diagnostics()\n stats.update(self.eval_statistics)\n return stats\n\n def end_epoch(self, epoch):\n self._need_to_update_eval_statistics = True\n # Update the target policy completely.\n ptu.soft_update_from_to(\n self.policy, self.target_policy, 0\n )\n self.tk = 0\n\n @property\n def networks(self):\n return [\n self.policy,\n self.val,\n ]\n\n @property\n def optimizers(self):\n return [\n self.val_optimizer,\n self.policy_optimizer,\n ]\n\n def get_snapshot(self):\n return dict(\n policy=self.policy,\n target_policy=self.target_policy,\n val=self.val,\n )\n"
] | [
[
"torch.no_grad",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JordanRex/YAAML | [
"feedfae7f238c326235c1542cf8afcbdf020bf8d"
] | [
"src/algos/logit.py"
] | [
"from sklearn.linear_model import LogisticRegression\n\n# implementing only the baseline logistic model\n# need to add support for parameter tuning\n\n#kfold = model_selection.KFold(n_splits=5, random_state=1)\nmodelCV = LogisticRegression()\n# Create regularization penalty space\npenalty = ['l1', 'l2']\n# Create regularization hyperparameter space\nC = np.logspace(0, 5, 10)\n# Create hyperparameter options\nhyperparameters = dict(C=C, penalty=penalty)\n# Create grid search using 5-fold cross validation\nclf = GridSearchCV(modelCV, hyperparameters, cv=5, verbose=0)\n\n# Fit grid search\nmodel_fit = clf.fit(X_train, y_train)\n\n# View best hyperparameters\nprint('Best Penalty:', model_fit.best_estimator_.get_params()['penalty'])\nprint('Best C:', model_fit.best_estimator_.get_params()['C'])\n\n#scoring = 'recall' # give precision or f1\n#results = model_selection.cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)\n#print(\"5-fold cross validation average accuracy: %.3f\" % (results.mean()))\n\n# getting the predictions for the actual test set\nlog_pred = model_fit.best_estimator_.predict_proba(X=X_valid)[:, 1]\nlog_predict = np.where(log_pred > 0.5, 1, 0)\n\n# print the various evaluation metrics\nprint('auc: ', sklearn.metrics.roc_auc_score(y_score=log_pred, y_true=y_valid))\nprint('recall: ', sklearn.metrics.recall_score(y_pred=log_predict, y_true=y_valid))\nprint('precision: ', sklearn.metrics.precision_score(y_pred=log_predict, y_true=y_valid))\nprint('f1: ', sklearn.metrics.f1_score(y_pred=log_predict, y_true=y_valid))"
] | [
[
"sklearn.linear_model.LogisticRegression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JianhuanZhuo/RecBole | [
"fb9ee722bb1349db113af15b9bf6a14a2bccd0eb"
] | [
"recbole/model/general_recommender/ncl.py"
] | [
"# -*- coding: utf-8 -*-\n\nr\"\"\"\nNCL\n################################################\n\nReference:\n Zihan Lin*, Changxin Tian*, Yupeng Hou*, Wayne Xin Zhao. \"Improving Graph Collaborative Filtering with Neighborhood-enriched Contrastive Learning.\" in WWW 2022.\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn.functional as F\n\nfrom recbole.model.abstract_recommender import GeneralRecommender\nfrom recbole.model.init import xavier_uniform_initialization\nfrom recbole.model.loss import BPRLoss, EmbLoss\nfrom recbole.utils import InputType\n\n\nclass NCL(GeneralRecommender):\n r\"\"\"NCL is a neighborhood-enriched contrastive learning paradigm for graph collaborative filtering.\n Both structural and semantic neighbors are explicitly captured as contrastive learning objects.\n \"\"\"\n input_type = InputType.PAIRWISE\n\n def __init__(self, config, dataset):\n super(NCL, self).__init__(config, dataset)\n\n # load dataset info\n self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n\n # load parameters info\n self.latent_dim = config['embedding_size'] # int type: the embedding size of the base model\n self.n_layers = config['n_layers'] # int type: the layer num of the base model\n self.reg_weight = config['reg_weight'] # float32 type: the weight decay for l2 normalization\n\n self.ssl_temp = config['ssl_temp']\n self.ssl_reg = config['ssl_reg']\n self.hyper_layers = config['hyper_layers']\n\n self.alpha = config['alpha']\n\n self.proto_reg = config['proto_reg']\n self.k = config['num_clusters']\n\n # define layers and loss\n self.user_embedding = torch.nn.Embedding(num_embeddings=self.n_users, embedding_dim=self.latent_dim)\n self.item_embedding = torch.nn.Embedding(num_embeddings=self.n_items, embedding_dim=self.latent_dim)\n\n self.mf_loss = BPRLoss()\n self.reg_loss = EmbLoss()\n\n # storage variables for full sort evaluation acceleration\n self.restore_user_e = None\n self.restore_item_e = None\n\n self.norm_adj_mat = self.get_norm_adj_mat().to(self.device)\n\n # parameters initialization\n self.apply(xavier_uniform_initialization)\n self.other_parameter_name = ['restore_user_e', 'restore_item_e']\n\n self.user_centroids = None\n self.user_2cluster = None\n self.item_centroids = None\n self.item_2cluster = None\n\n def e_step(self):\n user_embeddings = self.user_embedding.weight.detach().cpu().numpy()\n item_embeddings = self.item_embedding.weight.detach().cpu().numpy()\n self.user_centroids, self.user_2cluster = self.run_kmeans(user_embeddings)\n self.item_centroids, self.item_2cluster = self.run_kmeans(item_embeddings)\n\n def run_kmeans(self, x):\n \"\"\"Run K-means algorithm to get k clusters of the input tensor x\n \"\"\"\n import faiss\n kmeans = faiss.Kmeans(d=self.latent_dim, k=self.k, gpu=True)\n kmeans.train(x)\n cluster_cents = kmeans.centroids\n\n _, I = kmeans.index.search(x, 1)\n\n # convert to cuda Tensors for broadcast\n centroids = torch.Tensor(cluster_cents).to(self.device)\n centroids = F.normalize(centroids, p=2, dim=1)\n\n node2cluster = torch.LongTensor(I).squeeze().to(self.device)\n return centroids, node2cluster\n\n def get_norm_adj_mat(self):\n r\"\"\"Get the normalized interaction matrix of users and items.\n\n Construct the square matrix from the training data and normalize it\n using the laplace matrix.\n\n .. math::\n A_{hat} = D^{-0.5} \\times A \\times D^{-0.5}\n\n Returns:\n Sparse tensor of the normalized interaction matrix.\n \"\"\"\n # build adj matrix\n A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)\n inter_M = self.interaction_matrix\n inter_M_t = self.interaction_matrix.transpose()\n data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz))\n data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz)))\n A._update(data_dict)\n # norm adj matrix\n sumArr = (A > 0).sum(axis=1)\n # add epsilon to avoid divide by zero Warning\n diag = np.array(sumArr.flatten())[0] + 1e-7\n diag = np.power(diag, -0.5)\n self.diag = torch.from_numpy(diag).to(self.device)\n D = sp.diags(diag)\n L = D @ A @ D\n # covert norm_adj matrix to tensor\n L = sp.coo_matrix(L)\n row = L.row\n col = L.col\n i = torch.LongTensor(np.array([row, col]))\n data = torch.FloatTensor(L.data)\n SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape))\n return SparseL\n\n def get_ego_embeddings(self):\n r\"\"\"Get the embedding of users and items and combine to an embedding matrix.\n\n Returns:\n Tensor of the embedding matrix. Shape of [n_items+n_users, embedding_dim]\n \"\"\"\n user_embeddings = self.user_embedding.weight\n item_embeddings = self.item_embedding.weight\n ego_embeddings = torch.cat([user_embeddings, item_embeddings], dim=0)\n return ego_embeddings\n\n def forward(self):\n all_embeddings = self.get_ego_embeddings()\n embeddings_list = [all_embeddings]\n for layer_idx in range(max(self.n_layers, self.hyper_layers*2)):\n all_embeddings = torch.sparse.mm(self.norm_adj_mat, all_embeddings)\n embeddings_list.append(all_embeddings)\n\n lightgcn_all_embeddings = torch.stack(embeddings_list[:self.n_layers+1], dim=1)\n lightgcn_all_embeddings = torch.mean(lightgcn_all_embeddings, dim=1)\n\n user_all_embeddings, item_all_embeddings = torch.split(lightgcn_all_embeddings, [self.n_users, self.n_items])\n return user_all_embeddings, item_all_embeddings, embeddings_list\n\n def ProtoNCE_loss(self, node_embedding, user, item):\n user_embeddings_all, item_embeddings_all = torch.split(node_embedding, [self.n_users, self.n_items])\n\n user_embeddings = user_embeddings_all[user] # [B, e]\n norm_user_embeddings = F.normalize(user_embeddings)\n\n user2cluster = self.user_2cluster[user] # [B,]\n user2centroids = self.user_centroids[user2cluster] # [B, e]\n pos_score_user = torch.mul(norm_user_embeddings, user2centroids).sum(dim=1)\n pos_score_user = torch.exp(pos_score_user / self.ssl_temp)\n ttl_score_user = torch.matmul(norm_user_embeddings, self.user_centroids.transpose(0, 1))\n ttl_score_user = torch.exp(ttl_score_user / self.ssl_temp).sum(dim=1)\n\n proto_nce_loss_user = -torch.log(pos_score_user / ttl_score_user).sum()\n\n item_embeddings = item_embeddings_all[item]\n norm_item_embeddings = F.normalize(item_embeddings)\n\n item2cluster = self.item_2cluster[item] # [B, ]\n item2centroids = self.item_centroids[item2cluster] # [B, e]\n pos_score_item = torch.mul(norm_item_embeddings, item2centroids).sum(dim=1)\n pos_score_item = torch.exp(pos_score_item / self.ssl_temp)\n ttl_score_item = torch.matmul(norm_item_embeddings, self.item_centroids.transpose(0, 1))\n ttl_score_item = torch.exp(ttl_score_item / self.ssl_temp).sum(dim=1)\n proto_nce_loss_item = -torch.log(pos_score_item / ttl_score_item).sum()\n\n proto_nce_loss = self.proto_reg * (proto_nce_loss_user + proto_nce_loss_item)\n return proto_nce_loss\n\n def ssl_layer_loss(self, current_embedding, previous_embedding, user, item):\n current_user_embeddings, current_item_embeddings = torch.split(current_embedding, [self.n_users, self.n_items])\n previous_user_embeddings_all, previous_item_embeddings_all = torch.split(previous_embedding, [self.n_users, self.n_items])\n\n current_user_embeddings = current_user_embeddings[user]\n previous_user_embeddings = previous_user_embeddings_all[user]\n norm_user_emb1 = F.normalize(current_user_embeddings)\n norm_user_emb2 = F.normalize(previous_user_embeddings)\n norm_all_user_emb = F.normalize(previous_user_embeddings_all)\n pos_score_user = torch.mul(norm_user_emb1, norm_user_emb2).sum(dim=1)\n ttl_score_user = torch.matmul(norm_user_emb1, norm_all_user_emb.transpose(0, 1))\n pos_score_user = torch.exp(pos_score_user / self.ssl_temp)\n ttl_score_user = torch.exp(ttl_score_user / self.ssl_temp).sum(dim=1)\n\n ssl_loss_user = -torch.log(pos_score_user / ttl_score_user).sum()\n\n current_item_embeddings = current_item_embeddings[item]\n previous_item_embeddings = previous_item_embeddings_all[item]\n norm_item_emb1 = F.normalize(current_item_embeddings)\n norm_item_emb2 = F.normalize(previous_item_embeddings)\n norm_all_item_emb = F.normalize(previous_item_embeddings_all)\n pos_score_item = torch.mul(norm_item_emb1, norm_item_emb2).sum(dim=1)\n ttl_score_item = torch.matmul(norm_item_emb1, norm_all_item_emb.transpose(0, 1))\n pos_score_item = torch.exp(pos_score_item / self.ssl_temp)\n ttl_score_item = torch.exp(ttl_score_item / self.ssl_temp).sum(dim=1)\n\n ssl_loss_item = -torch.log(pos_score_item / ttl_score_item).sum()\n\n ssl_loss = self.ssl_reg * (ssl_loss_user + self.alpha * ssl_loss_item)\n return ssl_loss\n\n def calculate_loss(self, interaction):\n # clear the storage variable when training\n if self.restore_user_e is not None or self.restore_item_e is not None:\n self.restore_user_e, self.restore_item_e = None, None\n\n user = interaction[self.USER_ID]\n pos_item = interaction[self.ITEM_ID]\n neg_item = interaction[self.NEG_ITEM_ID]\n\n user_all_embeddings, item_all_embeddings, embeddings_list = self.forward()\n\n center_embedding = embeddings_list[0]\n context_embedding = embeddings_list[self.hyper_layers * 2]\n\n ssl_loss = self.ssl_layer_loss(context_embedding, center_embedding, user, pos_item)\n proto_loss = self.ProtoNCE_loss(center_embedding, user, pos_item)\n\n u_embeddings = user_all_embeddings[user]\n pos_embeddings = item_all_embeddings[pos_item]\n neg_embeddings = item_all_embeddings[neg_item]\n\n # calculate BPR Loss\n pos_scores = torch.mul(u_embeddings, pos_embeddings).sum(dim=1)\n neg_scores = torch.mul(u_embeddings, neg_embeddings).sum(dim=1)\n\n mf_loss = self.mf_loss(pos_scores, neg_scores)\n\n u_ego_embeddings = self.user_embedding(user)\n pos_ego_embeddings = self.item_embedding(pos_item)\n neg_ego_embeddings = self.item_embedding(neg_item)\n\n reg_loss = self.reg_loss(u_ego_embeddings, pos_ego_embeddings, neg_ego_embeddings)\n\n return mf_loss + self.reg_weight * reg_loss, ssl_loss, proto_loss\n\n def predict(self, interaction):\n user = interaction[self.USER_ID]\n item = interaction[self.ITEM_ID]\n\n user_all_embeddings, item_all_embeddings, embeddings_list = self.forward()\n\n u_embeddings = user_all_embeddings[user]\n i_embeddings = item_all_embeddings[item]\n scores = torch.mul(u_embeddings, i_embeddings).sum(dim=1)\n return scores\n\n def full_sort_predict(self, interaction):\n user = interaction[self.USER_ID]\n if self.restore_user_e is None or self.restore_item_e is None:\n self.restore_user_e, self.restore_item_e, embedding_list = self.forward()\n # get user embedding from storage variable\n u_embeddings = self.restore_user_e[user]\n\n # dot with all item embedding to accelerate\n scores = torch.matmul(u_embeddings, self.restore_item_e.transpose(0, 1))\n\n return scores.view(-1)\n"
] | [
[
"torch.mean",
"torch.cat",
"torch.sparse.mm",
"torch.nn.Embedding",
"torch.FloatTensor",
"torch.split",
"scipy.sparse.coo_matrix",
"torch.Size",
"scipy.sparse.dok_matrix",
"scipy.sparse.diags",
"torch.from_numpy",
"torch.mul",
"torch.LongTensor",
"numpy.power",
"torch.exp",
"torch.log",
"torch.stack",
"numpy.array",
"torch.nn.functional.normalize",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Diemo-zz/KiezBurnTokenRedistribution | [
"2ae141cfb7984c9e7a032b616921d38462f409b1"
] | [
"scrape_budget3.py"
] | [
"from selenium import webdriver\nfrom time import sleep\nimport pandas as pd\nfrom DreamsList import DreamList\n\n\ndef load_all_dreams(driver):\n sleep(5)\n def find_button(driver):\n mt7 = driver.find_elements_by_xpath(\"//button\")\n button = None\n for b in mt7:\n divs = b.find_elements_by_xpath(\".//div\")\n if divs:\n button = b\n return button\n\n while True:\n button = find_button(driver)\n if button is None:\n break\n button.click()\n sleep(1)\n\n\ndef find_all_dream_links(driver):\n hrefs = driver.find_elements_by_xpath(\"//a[@href]\")\n hrefs = [e.get_attribute('href') for e in hrefs]\n hrefs = [e for e in hrefs if e.startswith(\"https://kiezburn.dreams.wtf/kb21/60\")]\n return hrefs\n\n\ndef find_dreamers(driver):\n els = driver.find_elements_by_xpath(\"//div[@class='mt-5 space-y-5']\")\n els = els[0]\n k = els.find_elements_by_xpath(\".//div[@User]\")\n users = [k1.get_attribute('alt') for k1 in k]\n return users\n\ndef find_funded(driver):\n paragraph = driver.find_elements_by_xpath(\"//p[text()='EUR']\")\n votes = None\n for p in paragraph:\n split = p.text.split()\n split = combine_numbers(split)\n if len(split) == 2:\n votes = float(split[0])\n return votes\n\n\ndef combine_numbers(split):\n to_remove = []\n if len(split) != 4:\n for index, (n, one) in enumerate(zip(split[:-1], split[1:])):\n try:\n float(n)\n float(one)\n success = True\n except:\n success = False\n if success:\n new_number = n + one\n split[index] = new_number\n to_remove.append(one)\n split = list(filter(lambda x: x not in to_remove, split))\n return split\n\n\ndef calculate_budget(table):\n cells = table.find_elements_by_xpath(\".//td\")\n all_min = 0\n all_max = 0\n for cell in cells:\n txt = cell.text\n split = txt.split()\n if any(x == \"EUR\" for x in split):\n split = combine_numbers(split)\n minimum = float(split[0])\n try:\n maximum = float(split[2])\n except IndexError:\n maximum = minimum\n all_min += minimum\n all_max += maximum\n return all_min, all_max\n\n\ndef calculate_prefunding(table):\n cells = table.find_elements_by_xpath(\".//td\")\n prefund = 0\n for cell in cells:\n text = cell.text\n split = text.split()\n if any(x==\"EUR\" for x in split):\n split = combine_numbers(split)\n prefund += float(split[0])\n return prefund\n\n\ndef find_budget(driver):\n votes = find_funded(driver)\n tables = driver.find_elements_by_xpath(\"//table\")\n if len(tables) == 1:\n all_min, all_max = calculate_budget(tables[0])\n return {\"minimum_budget\": all_min, \"maximum_budget\": all_max, \"preexisting_funding\": 0, \"total_funding\": votes}\n\n elif len(tables) == 2:\n all_min, all_max = calculate_budget(tables[0])\n pre = calculate_prefunding(tables[1])\n return {\"minimum_budget\": all_min, \"maximum_budget\": all_max, \"preexisting_funding\": pre, \"total_funding\": votes}\n else:\n return {\"minimum_budget\": 0, \"maximum_budget\": 0, \"preexisting_funding\": 0, \"total_funding\": 0}\n\ndef main(driver):\n driver.get(\"https://kiezburn.dreams.wtf/kb21\")\n load_all_dreams(driver)\n hrefs = find_all_dream_links(driver)\n all_dreams = []\n for h in hrefs:\n driver.get(h)\n sleep(1)\n this_dream = {}\n this_dream['link'] = h\n this_dream['name'] = find_name(driver)\n print(this_dream.get('name'))\n this_dream['dreamers'] = find_dreamers(driver)\n this_dream.update(find_budget(driver))\n all_dreams.append(this_dream)\n return all_dreams\n\n\ndef find_name(driver):\n headers = driver.find_elements_by_xpath(\"//div/h1\")\n return headers[0].text\n\nif __name__ == \"__main__\":\n driver = webdriver.Chrome()\n #url = \"https://kiezburn.dreams.wtf/kb21/60c9aed9551867002ccd8fcb\"\n #driver.get(url)\n #find_name(driver)\n #dreamers = find_budget(driver)\n #print(dreamers)\n all_dreams = main(driver)\n df = pd.DataFrame(all_dreams)\n print(df.head())\n import pickle\n with open(\"dumped_df_with_names.pickle\", \"wb\") as f:\n pickle.dump(df, f)\n ##d_list = DreamList.from_dataframe(df)\n driver.close()\n\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lsgai/selene | [
"ad23904cad2a5a292732ff350e7689c0b9e511f4"
] | [
"selene_sdk/predict/_variant_effect_prediction.py"
] | [
"import math\n\nimport numpy as np\n\nfrom ._common import _truncate_sequence\nfrom ._common import predict\n\n\nVCF_REQUIRED_COLS = [\"#CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\"]\n\n\n# TODO: Is this a general method that might belong in utils?\ndef read_vcf_file(input_path,\n strand_index=None,\n require_strand=False,\n output_NAs_to_file=None,\n seq_context=None,\n reference_sequence=None):\n \"\"\"\n Read the relevant columns for a variant call format (VCF) file to\n collect variants for variant effect prediction.\n\n Parameters\n ----------\n input_path : str\n Path to the VCF file.\n strand_index : int or None, optional\n Default is None. By default we assume the input sequence\n surrounding a variant is on the forward strand. If your\n model is strand-specific, you may specify a column number\n (0-based) in the VCF file that includes strand information. Please\n note that variant position, ref, and alt should still be specified\n for the forward strand and Selene will apply reverse complement\n to this variant.\n require_strand : bool, optional\n Default is False. Whether strand can be specified as '.'. If False,\n Selene accepts strand value to be '+', '-', or '.' and automatically\n treats '.' as '+'. If True, Selene skips any variant with strand '.'.\n This parameter assumes that `strand_index` has been set.\n output_NAs_to_file : str or None, optional\n Default is None. Only used if `reference_sequence` and `seq_context`\n are also not None. Specify a filepath to which invalid variants are\n written. Invalid = sequences that cannot be fetched, either because\n the exact chromosome cannot be found in the `reference_sequence` FASTA\n file or because the sequence retrieved based on the specified\n `seq_context` is out of bounds or overlapping with blacklist regions.\n seq_context : int or tuple(int, int) or None, optional\n Default is None. Only used if `reference_sequence` is not None.\n Specifies the sequence context in which the variant is centered.\n `seq_context` accepts a tuple of ints specifying the start and end\n radius surrounding the variant position or a single int if the\n start and end radius are the same length.\n reference_sequence : selene_sdk.sequences.Genome or None, optional\n Default is None. Only used if `seq_context` is not None.\n The reference genome.\n\n Returns\n -------\n list(tuple)\n List of variants. Tuple = (chrom, position, id, ref, alt, strand)\n\n \"\"\"\n variants = []\n na_rows = []\n with open(input_path, 'r') as file_handle:\n lines = file_handle.readlines()\n index = 0\n for index, line in enumerate(lines):\n if '#' not in line:\n break\n if \"#CHROM\" in line:\n cols = line.strip().split('\\t')\n if cols[:5] != VCF_REQUIRED_COLS:\n raise ValueError(\n \"First 5 columns in file {0} were {1}. \"\n \"Expected columns: {2}\".format(\n input_path, cols[:5], VCF_REQUIRED_COLS))\n index += 1\n break\n for line in lines[index:]:\n cols = line.strip().split('\\t')\n if len(cols) < 5:\n na_rows.append(line)\n continue\n chrom = str(cols[0])\n if 'CHR' == chrom[:3]:\n chrom = chrom.replace('CHR', 'chr')\n elif \"chr\" not in chrom:\n chrom = \"chr\" + chrom\n\n if chrom == \"chrMT\" and \\\n chrom not in reference_sequence.get_chrs():\n chrom = \"chrM\"\n\n pos = int(cols[1])\n name = cols[2]\n ref = cols[3]\n if ref == '-':\n ref = \"\"\n alt = cols[4]\n strand = '+'\n if strand_index is not None:\n if require_strand and cols[strand_index] == '.':\n na_rows.append(line)\n continue\n elif cols[strand_index] == '-':\n strand = '-'\n\n if reference_sequence and seq_context:\n if isinstance(seq_context, int):\n seq_context = (seq_context, seq_context)\n lhs_radius, rhs_radius = seq_context\n start = pos + len(ref) // 2 - lhs_radius\n end = pos + len(ref) // 2 + rhs_radius\n if not reference_sequence.coords_in_bounds(chrom, start, end):\n na_rows.append(line)\n continue\n for a in alt.split(','):\n variants.append((chrom, pos, name, ref, a, strand))\n\n if reference_sequence and seq_context and output_NAs_to_file:\n with open(output_NAs_to_file, 'w') as file_handle:\n for na_row in na_rows:\n file_handle.write(na_row)\n return variants\n\n\ndef _get_ref_idxs(seq_len, ref_len):\n mid = seq_len // 2\n if seq_len % 2 == 0:\n mid -= 1\n start_pos = mid - ref_len // 2\n end_pos = start_pos + ref_len\n return (start_pos, end_pos)\n\n\ndef _process_alt(chrom,\n pos,\n ref,\n alt,\n start,\n end,\n wt_sequence,\n reference_sequence):\n \"\"\"\n Return the encoded sequence centered at a given allele for input into\n the model.\n\n Parameters\n ----------\n chrom : str\n The chromosome the variant is in\n pos : int\n The position of the variant\n ref : str\n The reference allele of the variant\n alt : str\n The alternate allele\n start : int\n The start coordinate for genome query\n end : int\n The end coordinate for genome query\n wt_sequence : numpy.ndarray\n The reference sequence encoding\n reference_sequence : selene_sdk.sequences.Sequence\n The reference sequence Selene queries to retrieve the model input\n sequences based on variant coordinates.\n\n Returns\n -------\n list(numpy.ndarray)\n A list of the encoded sequences containing alternate alleles at\n the center\n\n \"\"\"\n if alt == '*' or alt == '-': # indicates a deletion\n alt = ''\n ref_len = len(ref)\n alt_len = len(alt)\n if alt_len > len(wt_sequence):\n sequence = _truncate_sequence(alt, len(wt_sequence))\n return reference_sequence.sequence_to_encoding(\n sequence)\n\n alt_encoding = reference_sequence.sequence_to_encoding(alt)\n if ref_len == alt_len: # substitution\n start_pos, end_pos = _get_ref_idxs(len(wt_sequence), ref_len)\n sequence = np.vstack([wt_sequence[:start_pos, :],\n alt_encoding,\n wt_sequence[end_pos:, :]])\n return sequence\n elif alt_len > ref_len: # insertion\n start_pos, end_pos = _get_ref_idxs(len(wt_sequence), ref_len)\n sequence = np.vstack([wt_sequence[:start_pos, :],\n alt_encoding,\n wt_sequence[end_pos:, :]])\n trunc_s = (len(sequence) - wt_sequence.shape[0]) // 2\n trunc_e = trunc_s + wt_sequence.shape[0]\n sequence = sequence[trunc_s:trunc_e, :]\n return sequence\n else: # deletion\n lhs = reference_sequence.get_sequence_from_coords(\n chrom,\n start - ref_len // 2 + alt_len // 2,\n pos + 1,\n pad=True)\n rhs = reference_sequence.get_sequence_from_coords(\n chrom,\n pos + 1 + ref_len,\n end + math.ceil(ref_len / 2.) - math.ceil(alt_len / 2.),\n pad=True)\n sequence = lhs + alt + rhs\n return reference_sequence.sequence_to_encoding(\n sequence)\n\n\ndef _handle_standard_ref(ref_encoding,\n seq_encoding,\n seq_length,\n reference_sequence):\n ref_len = ref_encoding.shape[0]\n\n start_pos, end_pos = _get_ref_idxs(seq_length, ref_len)\n\n sequence_encoding_at_ref = seq_encoding[\n start_pos:start_pos + ref_len, :]\n references_match = np.array_equal(\n sequence_encoding_at_ref, ref_encoding)\n\n sequence_at_ref = None\n if not references_match:\n sequence_at_ref = reference_sequence.encoding_to_sequence(\n sequence_encoding_at_ref)\n seq_encoding[start_pos:start_pos + ref_len, :] = \\\n ref_encoding\n return references_match, seq_encoding, sequence_at_ref\n\n\ndef _handle_long_ref(ref_encoding,\n seq_encoding,\n start_radius,\n end_radius,\n reference_sequence):\n ref_len = ref_encoding.shape[0]\n sequence_encoding_at_ref = seq_encoding\n ref_start = ref_len // 2 - start_radius - 1\n ref_end = ref_len // 2 + end_radius - 1\n ref_encoding = ref_encoding[ref_start:ref_end]\n references_match = np.array_equal(\n sequence_encoding_at_ref, ref_encoding)\n\n sequence_at_ref = None\n if not references_match:\n sequence_at_ref = reference_sequence.encoding_to_sequence(\n sequence_encoding_at_ref)\n seq_encoding = ref_encoding\n return references_match, seq_encoding, sequence_at_ref\n\n\ndef _handle_ref_alt_predictions(model,\n batch_ref_seqs,\n batch_alt_seqs,\n batch_ids,\n reporters,\n use_cuda=False):\n \"\"\"\n Helper method for variant effect prediction. Gets the model\n predictions and updates the reporters.\n\n Parameters\n ----------\n model : torch.nn.Sequential\n The model, on mode `eval`.\n batch_ref_seqs : list(np.ndarray)\n One-hot encoded sequences with the ref base(s).\n batch_alt_seqs : list(np.ndarray)\n One-hot encoded sequences with the alt base(s).\n reporters : list(PredictionsHandler)\n List of prediction handlers.\n use_cuda : bool, optional\n Default is `False`. Specifies whether CUDA-enabled GPUs are available\n for torch to use.\n\n\n Returns\n -------\n None\n\n \"\"\"\n batch_ref_seqs = np.array(batch_ref_seqs)\n batch_alt_seqs = np.array(batch_alt_seqs)\n ref_outputs = predict(model, batch_ref_seqs, use_cuda=use_cuda)\n alt_outputs = predict(model, batch_alt_seqs, use_cuda=use_cuda)\n for r in reporters:\n if r.needs_base_pred:\n r.handle_batch_predictions(alt_outputs, batch_ids, ref_outputs)\n else:\n r.handle_batch_predictions(alt_outputs, batch_ids)\n"
] | [
[
"numpy.vstack",
"numpy.array",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zaustinj33/SysAnalysisRNABS | [
"730014b1a9ab5fd869bc6a265dfe369421c74f8e"
] | [
"Figure_4/plot_genes_sites_from_meRanCall.py"
] | [
"import glob\nimport pandas as pd\nimport os, sys, re\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n#%%\n\"\"\"\nAccumulates 1) unique genes 2) unique sites counts for all samples. Plots the output.\n\"\"\"\nheaders = ['#SeqID', 'refPos', 'strand', 'Base', 'cov', 'C_count','methRate']\nmethRate_counts = {}\nmethRate_counts_filtered = {}\ngene_counts = {}\nsites_per_gene = {}\nfor file in glob.glob(\"**/*_Genome10xCall_annotate.txt\", recursive=True): #*_Genome10xCall*_annotate.txt\", recursive=True):\n name = re.sub(\"_Genome10xCall_annotate.txt\", \"\", os.path.basename(file))\n print(file)\n df = pd.read_csv(file, sep='\\t', low_memory=False) # file\n #df = df.iloc[:,:7]\n #df.columns = headers\n\n #1) basic filter\n df_filter = df[((df['cov'] >= 20) & (df['methRate'] >= 0.1) & (df['C_count'] >= 3))]\n sites_per_gene_sample = df.gene.value_counts()\n #2) genes per library\n gene_counts[name] = len(sites_per_gene_sample)\n #3) sites per gene\n sites_per_gene[name] = sites_per_gene_sample\n #4) methylation rate\n methRate_counts[name] = df['methRate']\n methRate_counts_filtered[name] = df_filter['methRate']\n\n#%%\norder_x = ['G1', 'G2', 'G3', 'G4', 'MF', 'pMF', 'SRR']\n\n# plot genes per sample\ngene_counts_df = pd.DataFrame(gene_counts.items(), columns=['name', 'counts'])\ngene_counts_df['group'] = gene_counts_df['name'].str.replace(r'817.*$|_.*$', '') # add levels\n\nsns.barplot(data=gene_counts_df, x='group', y='counts', palette='bright', order=order_x)\nsns.swarmplot(data=gene_counts_df, x='group', y='counts', color='black', order=order_x)\n\nplt.xticks(rotation=90)\nplt.savefig(\"genes_per_lib.png\", bbox_inches='tight', dpi=400, transparent=True)\nplt.show()\nplt.close()\n#%%\n\n# plot sites per gene\nsites_df = pd.DataFrame(sites_per_gene)\nsites_melt = sites_df.melt()\n#%%\nindex_order = ['G1', 'G2','G3', 'G4', 'MF_rep1',\n 'MF_rep2', 'pMF_rep1',\n 'pMF_rep2', 'SRR8170377','SRR8170378', 'SRR8170379', 'SRR8170380']\n\nsns.violinplot(data=sites_melt, x='variable', y='value', color='black', order=index_order,\n cut=0) # inner='point'\nplt.xticks(rotation=90)\nplt.yscale('log')\nplt.savefig(\"sites_per_gene.png\", bbox_inches='tight', dpi=400, transparent=True)\nplt.show()\nplt.close()\n\n#%%\n#methRate_df\nmethRate_df = pd.DataFrame(methRate_counts).melt()\nmethRate_df['group'] = methRate_df['variable'].str.replace(r'817.*$|_.*$', '')\n#%%\nmethRate_df_filtered = pd.DataFrame(methRate_counts_filtered).melt().dropna()\nmethRate_df_filtered['group'] = methRate_df_filtered['variable'].str.replace(r'817.*$|_.*$', '')\n\n#%%\npalette_bright = sns.color_palette(\"bright\",12)\ncolor_order = [1,2,3,4,6,9,10]\ncolors = [palette_bright[i] for i in color_order]\n#%%\n# culmative coverage plot; methRate_df_filtered or methRate_df_filtered\nplt.axvline(0.1, linestyle='--', color='black')\nsns.ecdfplot(x='value', data=methRate_df, hue='group',#, weights='cov',\n palette=colors)\nplt.savefig(\"Culm_rate_all.png\", bbox_inches='tight', dpi=400, transparent=True)\nplt.show()\nplt.close()\n#%%\nplt.axvline(0.1, linestyle='--', color='black')\nsns.ecdfplot(x='value', data=methRate_df_filtered, hue='group',#, weights='cov',\n palette=colors)\nplt.xlim([0,1])\n#plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1))\n\nplt.savefig(\"Culm_rate_all_filtered.png\", bbox_inches='tight', dpi=400, transparent=True)\nplt.show()\nplt.close()\n\n"
] | [
[
"matplotlib.pyplot.axvline",
"pandas.read_csv",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
robertoxmed/ls_mxc | [
"5b62fcf35592f90cd239390e14149c28b97b8692"
] | [
"bench/bench.py"
] | [
"#!/usr/bin/python\nimport numpy as np\nimport os\nimport optparse\nimport sys\nimport shutil\nimport textwrap\nimport smtplib\nimport time\nimport matplotlib.pyplot as plt\nimport csv\nimport zipfile\nimport tempfile\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nfrom _ast import With\n\n# Global setup for generation and benchmarks\nglobal number_levels\nglobal number_tasks\nglobal number_dags\nglobal number_cores\nglobal edge_percentage\n\nnumber_levels = [2]\nnumber_tasks = [30]\nnumber_dags = [1]\nnumber_cores = [8]\nedge_percentage = [40]\nnumber_jobs = 16\nnumber_files = \"10\"\n\n# Global setup for matplotlib\nschedulers = [\"llf\", \"edf\", \"ezl\"]\ncolors = ['b', 'g', 'r']\n\ndef create_setup():\n # Create the directory tree for generation\n if not os.path.exists(\"genned\"):\n os.makedirs(\"genned\")\n \n for l in number_levels:\n if not os.path.exists(\"genned/l\"+str(l)):\n os.makedirs(\"genned/l\"+str(l))\n\n for c in number_cores:\n if not os.path.exists(\"genned/l\"+str(l)+\"/c\"+str(c)):\n os.makedirs(\"genned/l\"+str(l)+\"/c\"+str(c))\n\n for p in edge_percentage:\n if not os.path.exists(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)):\n os.makedirs(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p))\n\n for d in number_dags:\n if not os.path.exists(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)):\n os.makedirs(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d))\n\n for t in number_tasks:\n if not os.path.exists(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)):\n os.makedirs(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t))\n\n # Create the directory tree for benchmarking\n if not os.path.exists(\"results\"):\n os.makedirs(\"results\")\n\n for l in number_levels:\n if not os.path.exists(\"results/l\"+str(l)):\n os.makedirs(\"results/l\"+str(l))\n \n for c in number_cores:\n if not os.path.exists(\"results/l\"+str(l)+\"/c\"+str(c)):\n os.makedirs(\"results/l\"+str(l)+\"/c\"+str(c))\n\n for p in edge_percentage:\n if not os.path.exists(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)):\n os.makedirs(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p))\n\n for d in number_dags:\n if not os.path.exists(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)):\n os.makedirs(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d))\n\n for t in number_tasks:\n if not os.path.exists(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)):\n os.makedirs(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t))\n if not os.path.exists(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/detail\"):\n os.makedirs(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/detail\")\n\n print(\"MC-DAG script > Finished setup!\")\n\ndef clean_generated():\n for l in number_levels:\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n # Create the folder string\n folder = str(\"genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t))\n for file in os.listdir(folder):\n file_path = os.path.join(folder, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n print(\"MC-DAG script > All generated files have been cleaned!\\n\")\n\ndef generate():\n \n for l in number_levels:\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n # Vary utilization\n low_bound = c /4\n step = c * 0.025\n upper_bound = c + step\n\n for u in np.arange(low_bound, upper_bound, step):\n cmd = \"java -jar bin/generator.jar -mu \"+str(round(u,2))+\"\\\n -nd \"+str(d)+\" -l \"+str(l)+\" -nt \"+str(t)+\" -nf \"+number_files+\" -e \"+str(p)+\"\\\n -o genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/test-\"+str(round(u,2))+\".xml\\\n -p 1 -j \"+str(number_jobs)\n\n ret = os.system(cmd)\n if ret != 0:\n print(\"MC-DAG script > ERROR unexpected behavior for the generation. Exiting...\")\n return -1\n\ndef benchmark():\n \n for l in number_levels:\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n low_bound = c /4\n step = c * 0.025\n upper_bound = c + step\n\n for u in np.arange(low_bound, upper_bound, step):\n cmd = \"java -jar bin/benchmark.jar -i genned/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/test-\"+str(round(u,2))+\"*.xml\\\n -o results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/detail/out-\"+str(round(u,2))+\".csv \\\n -ot results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/out-l\"+str(l)+\"-c-\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\"-total.csv\\\n -u \"+str(round(u,2))+\" -c \"+str(c)+\" -l \"+str(l)+\" -j \"+str(number_jobs)\n ret = os.system(cmd)\n if ret != 0:\n print(\"ERROR unexpected behavior for the benchmarking. Exiting...\")\n return -1\n\ndef plot():\n for l in number_levels:\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n x = []\n llf = []\n edf = []\n ezl = []\n fedllf = []\n fededf = []\n fedezl = []\n\n with open(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/out-l\"+str(l)+\"-c-\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\"-total.csv\", 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n x.append(float(row[0])/c)\n llf.append(float(row[1]))\n edf.append(float(row[5]))\n ezl.append(float(row[9]))\n fedllf.append(float(row[10]))\n fededf.append(float(row[11]))\n fedezl.append(float(row[12]))\n \n # Calculate polynomial approximations\n llf_z = np.polyfit(x, llf, 5)\n llf_f = np.poly1d(llf_z)\n llf_x_new = np.linspace(x[0], x[-1], 50)\n llf_y_new = llf_f(llf_x_new)\n \n edf_z = np.polyfit(x, edf, 5)\n edf_f = np.poly1d(edf_z)\n edf_x_new = np.linspace(x[0], x[-1], 50)\n edf_y_new = edf_f(edf_x_new)\n \n ezl_z = np.polyfit(x, ezl, 5)\n ezl_f = np.poly1d(ezl_z)\n ezl_x_new = np.linspace(x[0], x[-1], 50)\n ezl_y_new = ezl_f(ezl_x_new)\n \n fedllf_z = np.polyfit(x, fedllf, 5)\n fedllf_f = np.poly1d(fedllf_z)\n fedllf_x_new = np.linspace(x[0], x[-1], 50)\n fedllf_y_new = fedllf_f(fedllf_x_new)\n \n fededf_z = np.polyfit(x, fededf, 5)\n fededf_f = np.poly1d(fededf_z)\n fededf_x_new = np.linspace(x[0], x[-1], 50)\n fededf_y_new = fededf_f(fededf_x_new)\n \n fedezl_z = np.polyfit(x, fedezl, 5)\n fedezl_f = np.poly1d(fedezl_z)\n fedezl_x_new = np.linspace(x[0], x[-1], 50)\n fedezl_y_new = fedezl_f(fedezl_x_new)\n \n plt.figure() \n plt.plot(x,llf, 'b.')\n plt.plot(llf_x_new, llf_y_new, 'b', label='LLF')\n plt.plot(x,edf, 'gd', markersize=4)\n plt.plot(edf_x_new, edf_y_new, 'g', label='EDF')\n plt.plot(x,ezl, 'rs', markersize=3)\n plt.plot(ezl_x_new, ezl_y_new, 'r', label='EZL')\n \n plt.plot(x,fedllf, 'b.')\n plt.plot(fedllf_x_new, fedllf_y_new, 'b--', label='FED-LLF')\n plt.plot(x,fededf, 'gd', markersize=4)\n plt.plot(fededf_x_new, fededf_y_new, 'g--', label='FED-EDF')\n plt.plot(x,fedezl, 'rs', markersize=3)\n plt.plot(fedezl_x_new, fedezl_y_new, 'r--', label='FED-EZL')\n \n \n plt.xlabel('U norm')\n plt.ylabel('Acceptance rate')\n plt.title('Results levels '+str(l)+' tasks '+str(t))\n plt.legend()\n plt.savefig(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/graph-l\"+str(l)+\"-c\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\".png\")\n\ndef plot_levels():\n s = 1\n col = 0\n for sched in schedulers:\n i = 0\n lvl = [[] for x in range(len(number_levels))]\n plt.figure() \n \n for l in number_levels:\n x = []\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n with open(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/out-l\"+str(l)+\"-c-\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\"-total.csv\", 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n \n x.append(float(row[0])/c) \n lvl[i].append(float(row[s]))\n # Calculate polynomial approximation\n z = np.polyfit(x, lvl[i], 5)\n f = np.poly1d(z)\n x_new = np.linspace(x[0], x[-1], 50)\n y_new = f(x_new)\n plt.plot(x, lvl[i], colors[col]+'.')\n plt.plot(x_new, y_new, colors[col], label=str(l))\n i = i + 1\n col = col + 1 \n\n plt.xlabel('U norm')\n plt.ylabel('Acceptance rate')\n plt.title(sched+\" Results levels \"+str(l)+\" tasks \"+str(t))\n plt.savefig(sched+\".png\")\n s = s + 4\n col = 0\n\ndef plot_preempt():\n \n for l in number_levels:\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n plt.figure()\n i = 0\n s = 4\n col = 0\n preempt = [[] for x in range(len(schedulers))]\n for sched in schedulers:\n x = []\n\n plt.figure() \n\n with open(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/out-l\"+str(l)+\"-c-\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\"-total.csv\", 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n \n x.append(float(row[0])/c) \n preempt[i].append(float(row[s]))\n # Calculate polynomial approximation\n z = np.polyfit(x, preempt[i], 3)\n f = np.poly1d(z)\n x_new = np.linspace(x[0], x[-1], 50)\n y_new = f(x_new)\n plt.semilogy(y_new, np.log(100*y_new))\n plt.plot(x, preempt[i], colors[col]+'.')\n plt.plot(x_new, y_new, colors[col], label=str(l))\n s = s + 4\n col = col + 1\n i = i + 1\n \n plt.xlabel('U norm')\n plt.ylabel('Preemption/job')\n plt.title(\"Results levels \"+str(l)+\" tasks \"+str(t))\n plt.savefig(str(l)+\".png\")\n\ndef main():\n usage_str = \"%prog [options]\"\n description_str = \"Benchmark script\"\n epilog_str = \"Examples\"\n\n parser = optparse.OptionParser(usage = usage_str,\n description = description_str,\n epilog = epilog_str,\n add_help_option = False,\n version = \"%prog version 0.1\")\n\n parser.add_option(\"-h\", \"--help\", action = \"store_true\", dest = \"help\",\n default = False, help = \"Show this message and exit\")\n\n parser.add_option(\"-s\", \"--setup\", action = \"store_true\", dest = \"setup\",\n default = False, help = \"Setup folder for generation/benchmarks\")\n\n parser.add_option(\"-g\", \"--generate\", action = \"store_true\", dest = \"generate\",\n default = False, help = \"Launch generation\")\n\n parser.add_option('-b', \"--benchmark\", action = \"store_true\", dest = \"benchmark\",\n default = False, help = \"Launch benchmarking\")\n\n parser.add_option(\"-c\", \"--clean\", action = \"store_false\", dest = \"cleanup\",\n default = False, help = \"Cleanup generated files\")\n\n (options, args) = parser.parse_args()\n start = time.time()\n\n if options.help:\n print_help(parser)\n return 0\n\n if options.setup:\n create_setup()\n return 0\n\n if options.cleanup:\n clean_generated()\n return 0\n\n if options.generate:\n generate()\n\n if options.benchmark:\n #benchmark()\n plot()\n end = time.time()\n send_email(start,end)\n\n return 0\n\ndef print_help(parser):\n parser.print_help()\n\ndef send_email(t_start,t_end):\n i = 0\n with open(\"config.txt\") as f:\n for line in f:\n if i == 0:\n FROM = line.rstrip('\\n')\n elif i == 1:\n password = line.rstrip('\\n')\n elif i == 2:\n TO = line.rstrip('\\n')\n i += 1\n nb_files = len(number_levels) * len(number_cores) * len(edge_percentage) * len(number_dags) * len(number_tasks) * int(number_files)\n SUBJECT = \"Results for benchmarks ready\"\n TEXT = \"Statistics for the benchmarks:\\n\\\n - Elapsed time: \"+str(t_end - t_start)+\"\\n\\\n - Number of files: \"+str(nb_files)\n\n msg = MIMEMultipart()\n msg['From'] = \"MC-DAG script\"\n msg['To'] = TO\n msg['Subject'] = SUBJECT\n\n msg.attach(MIMEText(TEXT, 'plain'))\n\n # Attach graphs' PNGs\n for sched in schedulers:\n\n attachment = open(sched+\".png\", \"rb\")\n\n #attachment = open(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/graph-l\"+str(l)+\"-c\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\".png\", \"rb\")\n part = MIMEBase('application', 'octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', \"attachment; filename=\"+sched+\".png\")\n\n #part.add_header('Content-Disposition', \"attachment; filename=results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/graph-l\"+str(l)+\"-c\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\".png\")\n msg.attach(part)\n \n # Create a zip with results\n zf = tempfile.TemporaryFile(prefix='results', suffix='.zip')\n zip = zipfile.ZipFile(zf, 'w')\n for l in number_levels:\n for c in number_cores:\n for p in edge_percentage:\n for d in number_dags:\n for t in number_tasks:\n zip.write(\"results/l\"+str(l)+\"/c\"+str(c)+\"/e\"+str(p)+\"/\"+str(d)+\"/\"+str(t)+\"/out-l\"+str(l)+\"-c-\"+str(c)+\"-e\"+str(p)+\"-\"+str(d)+\"-\"+str(t)+\"-total.csv\")\n zip.close()\n zf.seek(0)\n part = MIMEBase('application', 'zip')\n part.set_payload(zf.read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', \"attachment; filename=results.zip\")\n msg.attach(part)\n \n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(FROM, password)\n text = msg.as_string()\n server.sendmail(FROM, TO, text)\n server.close()\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.polyfit",
"numpy.poly1d",
"numpy.log",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FanWangEcon/pyecon | [
"42b89bdae5dc871018a175d9dfb8d8f8f4729e50"
] | [
"setup.py"
] | [
"from setuptools import setup, find_packages\nfrom codecs import open\nimport numpy\nimport os\nfrom Cython.Build import cythonize\n\n# thanks Pipy for handling markdown now\nROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(ROOT, 'README.md'), encoding=\"utf-8\") as f:\n README = f.read()\n\n# We can actually import a restricted version of pyecon that\n# does not need the compiled code\nimport pyecon\n\nVERSION = pyecon.__version__\n\nsetup(\n name=\"pyecon\",\n description=\"Dynamic Heterogeneous Agents Equilibrium Models\",\n long_description=README,\n long_description_content_type='text/markdown',\n include_dirs=[numpy.get_include()],\n packages=find_packages(),\n data_files=[(\"\", [\"LICENSE\"])],\n install_requires=['numpy', 'scipy'],\n version=VERSION,\n url=\"http://pyecon.readthedocs.io/\",\n author=\"Fan Wang\",\n author_email=\"[email protected]\"\n)\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
refitt/ref | [
"3ccc398e7b95f77549ab77884b87f40abdd3effb"
] | [
"tests/unit/test_forecast.py"
] | [
"# SPDX-FileCopyrightText: 2019-2021 REFITT Team\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Unit tests for forecast interface.\"\"\"\n\n\n# type annotations\nfrom typing import Dict, Any\n\n# standard libs\nimport io\nimport json\nimport string\nimport random\n\n# external libs\nimport numpy as np\nfrom astropy.time import Time\nfrom hypothesis import given, strategies as st\n\n# internal libs\nfrom refitt.core.schema import SchemaError\nfrom refitt.data.forecast import Forecast\n\n\ndef generate_random_forecast() -> Dict[str, Any]:\n \"\"\"Generate random numbers to satisfy schema.\"\"\"\n return {\n # NOTE: ztf_id fixed because the object needs to exist for integration tests\n 'ztf_id': 'ZTF20actrfli',\n 'instrument': 'ZTF_public',\n 'time_since_trigger': random.randint(1, 20),\n 'current_time': random.uniform(59_260, 60_000),\n 'num_obs': random.randint(3, 20),\n 'filter': random.choice(['g-ztf', 'r-ztf']),\n 'class': [random.choices(string.ascii_uppercase, k=3), random.random()],\n 'phase': 'rising',\n 'next_mag_mean': random.uniform(14, 20),\n 'next_mag_sigma': random.random(),\n 'time_to_peak': list(map(float, np.random.rand(3))),\n 'time_arr': list(map(float, np.random.rand(100))),\n 'mag_mean': list(map(float, np.random.rand(100))),\n 'mag_sigma': list(map(float, np.random.rand(100))),\n 'mdmc': random.random(),\n 'moe': random.random(),\n }\n\n\nFORECAST_KEYS = [\n 'ztf_id', 'instrument', 'time_since_trigger', 'current_time', 'num_obs', 'filter', 'class',\n 'phase', 'next_mag_mean', 'next_mag_sigma', 'time_to_peak', 'time_arr', 'mag_mean',\n 'mag_sigma', 'mdmc', 'moe',\n]\n\n\nclass TestForecast:\n \"\"\"Unit tests against basic forecast interface.\"\"\"\n\n def test_init(self) -> None:\n \"\"\"Check instance creation.\"\"\"\n data = generate_random_forecast()\n forecast = Forecast(data)\n assert forecast.data == data\n\n @given(st.sampled_from(FORECAST_KEYS))\n def test_missing_key(self, key: str) -> None:\n \"\"\"Will raise SchemaError on missing key.\"\"\"\n data = generate_random_forecast()\n data.pop(key)\n try:\n _ = Forecast(data)\n except SchemaError as error:\n assert str(error) == f'Missing key \\'{key}\\''\n else:\n raise AssertionError('Expected SchemaError')\n\n def test_wrong_type_for_value(self) -> None:\n \"\"\"Will raise SchemaError on wrong type for value.\"\"\"\n data = generate_random_forecast()\n data['ztf_id'] = 123\n try:\n _ = Forecast(data)\n except SchemaError as error:\n assert str(error) == 'Expected type str for member \\'ztf_id\\', found int(123) at position 0'\n else:\n raise AssertionError('Expected SchemaError')\n\n def test_init_from_forecast(self) -> None:\n \"\"\"Test passive type coercion.\"\"\"\n data = generate_random_forecast()\n forecast = Forecast(data)\n assert forecast == Forecast(forecast)\n\n def test_from_dict(self) -> None:\n \"\"\"Test forecast initialization from existing dictionary.\"\"\"\n data = generate_random_forecast()\n assert Forecast.from_dict(data).data == data\n\n def test_to_dict(self) -> None:\n \"\"\"Test export to dictionary.\"\"\"\n data = generate_random_forecast()\n assert Forecast.from_dict(data).to_dict() == data\n\n def test_equality(self) -> None:\n \"\"\"Test equality comparison operator.\"\"\"\n data = generate_random_forecast()\n assert Forecast.from_dict(data) == Forecast.from_dict(data)\n assert Forecast.from_dict(data) != Forecast.from_dict(generate_random_forecast())\n\n def test_from_str(self) -> None:\n \"\"\"Test forecast initialization from existing string.\"\"\"\n data = generate_random_forecast()\n text = json.dumps(data)\n assert Forecast.from_str(text).data == data\n\n def test_from_io(self) -> None:\n \"\"\"Test forecast initialization from existing file descriptor.\"\"\"\n data = generate_random_forecast()\n text = json.dumps(data)\n stream = io.StringIO(text)\n assert Forecast.from_io(stream).data == data\n\n def test_from_local(self, tmpdir: str) -> None:\n \"\"\"Test forecast initialization from local file.\"\"\"\n data = generate_random_forecast()\n with open(f'{tmpdir}/forecast.json', mode='w') as stream:\n json.dump(data, stream)\n assert Forecast.from_local(f'{tmpdir}/forecast.json').data == data\n\n def test_to_local(self, tmpdir: str) -> None:\n \"\"\"Test forecast export to local file.\"\"\"\n data = generate_random_forecast()\n Forecast.from_dict(data).to_local(f'{tmpdir}/forecast.json')\n with open(f'{tmpdir}/forecast.json', mode='r') as stream:\n assert json.load(stream) == data\n\n def test_attributes(self) -> None:\n \"\"\"Check attribute access.\"\"\"\n data = generate_random_forecast()\n forecast = Forecast.from_dict(data)\n for field, value in data.items():\n assert getattr(forecast, field) == value\n\n def test_time(self) -> None:\n \"\"\"Check time property.\"\"\"\n data = generate_random_forecast()\n forecast = Forecast.from_dict(data)\n assert forecast.time == Time(forecast.current_time + 1, format='mjd', scale='utc').datetime\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Weizhuo-Zhang/car_detection_recognition | [
"7413992a5a7319f838b6b38597c1caee8dfdb641"
] | [
"server/yolo3/utils.py"
] | [
"\"\"\"Miscellaneous utility functions.\"\"\"\r\n\r\nfrom functools import reduce\r\n\r\nfrom PIL import Image,ImageFile\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\nimport numpy as np\r\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\r\n\r\ndef compose(*funcs):\r\n \"\"\"Compose arbitrarily many functions, evaluated left to right.\r\n\r\n Reference: https://mathieularose.com/function-composition-in-python/\r\n \"\"\"\r\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\r\n if funcs:\r\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\r\n else:\r\n raise ValueError('Composition of empty sequence not supported.')\r\n\r\ndef letterbox_image(image, size):\r\n '''resize image with unchanged aspect ratio using padding'''\r\n iw, ih = image.size\r\n w, h = size\r\n scale = min(w/iw, h/ih)\r\n nw = int(iw*scale)\r\n nh = int(ih*scale)\r\n\r\n image = image.resize((nw,nh), Image.BICUBIC)\r\n new_image = Image.new('RGB', size, (128,128,128))\r\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\r\n return new_image\r\n\r\ndef rand(a=0, b=1):\r\n return np.random.rand()*(b-a) + a\r\n\r\ndef get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):\r\n '''random preprocessing for real-time data augmentation'''\r\n line = annotation_line.split()\r\n image = Image.open(line[0])\r\n iw, ih = image.size\r\n h, w = input_shape\r\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\r\n\r\n if not random:\r\n # resize image\r\n scale = min(w/iw, h/ih)\r\n nw = int(iw*scale)\r\n nh = int(ih*scale)\r\n dx = (w-nw)//2\r\n dy = (h-nh)//2\r\n image_data=0\r\n if proc_img:\r\n image = image.resize((nw,nh), Image.BICUBIC)\r\n new_image = Image.new('RGB', (w,h), (128,128,128))\r\n new_image.paste(image, (dx, dy))\r\n image_data = np.array(new_image)/255.\r\n\r\n # correct boxes\r\n box_data = np.zeros((max_boxes,5))\r\n if len(box)>0:\r\n np.random.shuffle(box)\r\n if len(box)>max_boxes: box = box[:max_boxes]\r\n box[:, [0,2]] = box[:, [0,2]]*scale + dx\r\n box[:, [1,3]] = box[:, [1,3]]*scale + dy\r\n box_data[:len(box)] = box\r\n\r\n return image_data, box_data\r\n\r\n # resize image\r\n new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)\r\n scale = rand(.25, 2)\r\n if new_ar < 1:\r\n nh = int(scale*h)\r\n nw = int(nh*new_ar)\r\n else:\r\n nw = int(scale*w)\r\n nh = int(nw/new_ar)\r\n image = image.resize((nw,nh), Image.BICUBIC)\r\n\r\n # place image\r\n dx = int(rand(0, w-nw))\r\n dy = int(rand(0, h-nh))\r\n new_image = Image.new('RGB', (w,h), (128,128,128))\r\n new_image.paste(image, (dx, dy))\r\n image = new_image\r\n\r\n # flip image or not\r\n flip = rand()<.5\r\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\r\n\r\n # distort image\r\n hue = rand(-hue, hue)\r\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\r\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\r\n x = rgb_to_hsv(np.array(image)/255.)\r\n x[..., 0] += hue\r\n x[..., 0][x[..., 0]>1] -= 1\r\n x[..., 0][x[..., 0]<0] += 1\r\n x[..., 1] *= sat\r\n x[..., 2] *= val\r\n x[x>1] = 1\r\n x[x<0] = 0\r\n image_data = hsv_to_rgb(x) # numpy array, 0 to 1\r\n\r\n # correct boxes\r\n box_data = np.zeros((max_boxes,5))\r\n if len(box)>0:\r\n np.random.shuffle(box)\r\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\r\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\r\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\r\n box[:, 0:2][box[:, 0:2]<0] = 0\r\n box[:, 2][box[:, 2]>w] = w\r\n box[:, 3][box[:, 3]>h] = h\r\n box_w = box[:, 2] - box[:, 0]\r\n box_h = box[:, 3] - box[:, 1]\r\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\r\n if len(box)>max_boxes: box = box[:max_boxes]\r\n box_data[:len(box)] = box\r\n\r\n return image_data, box_data\r\n"
] | [
[
"numpy.logical_and",
"matplotlib.colors.hsv_to_rgb",
"numpy.random.shuffle",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ltindall/BirdsEye | [
"8a2dd1727097dc8308fbc292c619689b188ab62d"
] | [
"birdseye/utils.py"
] | [
"\"\"\"\nParticle Filter helper functions\n\"\"\"\nimport configparser\nimport json\nimport math\nimport os\nfrom collections import defaultdict\nfrom io import BytesIO\nfrom itertools import permutations\nfrom itertools import product\nfrom pathlib import Path\n\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom PIL import Image\nfrom scipy.ndimage.filters import gaussian_filter\n\nfrom .definitions import RUN_DIR\n\n\ndef permute_particle(particle):\n return np.hstack((particle[4:], particle[:4]))\n\n\ndef particle_swap(env):\n # 2000 x 8\n particles = np.copy(env.pf.particles)\n n_targets = env.state.n_targets\n state_dim = 4\n\n # convert particles to cartesian\n for i in range(n_targets):\n x, y = pol2cart(particles[:, state_dim*i],\n np.radians(particles[:, (state_dim*i)+1]))\n particles[:, state_dim*i] = x\n particles[:, (state_dim*i)+1] = y\n\n swapped = True\n k = 0\n while swapped and k < 10:\n k += 1\n swapped = False\n for i in range(len(particles)):\n original_particle = np.copy(particles[i])\n target_centroids = [\n np.mean(particles[:, state_dim*t:(state_dim*t)+2]) for t in range(n_targets)]\n distance = 0\n for t in range(n_targets):\n dif = particles[i, state_dim *\n t:(state_dim*t)+2] - target_centroids[t]\n distance += np.dot(dif, dif)\n\n permuted_particle = permute_particle(particles[i])\n particles[i] = permuted_particle\n permuted_target_centroids = [\n np.mean(particles[:, state_dim*t:(state_dim*t)+2]) for t in range(n_targets)]\n permuted_distance = 0\n for t in range(n_targets):\n dif = particles[i, state_dim *\n t:(state_dim*t)+2] - permuted_target_centroids[t]\n permuted_distance += np.dot(dif, dif)\n\n if distance < permuted_distance:\n particles[i] = original_particle\n else:\n swapped = True\n\n # convert particles to polar\n for i in range(n_targets):\n rho, phi = cart2pol(\n particles[:, state_dim*i], particles[:, (state_dim*i)+1])\n particles[:, state_dim*i] = rho\n particles[:, (state_dim*i)+1] = np.degrees(phi)\n\n env.pf.particles = particles\n\n\ndef pol2cart(rho, phi):\n \"\"\"\n Transform polar to cartesian\n \"\"\"\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return(x, y)\n\n\ndef cart2pol(x, y):\n \"\"\"\n Transform cartesian to polar\n \"\"\"\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return rho, phi\n\n\ndef get_distance(coord1, coord2):\n \"\"\"\n Get the distance between two coordinates\n \"\"\"\n if (coord1 is None) or (coord2 is None):\n return None\n\n lat1, long1 = coord1\n lat2, long2 = coord2\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = np.radians(lat1)\n long1 = np.radians(long1)\n\n lat2 = np.radians(lat2)\n long2 = np.radians(long2)\n\n dlon = long2 - long1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\n\n distance = R * c\n return distance*(1e3)\n\n\ndef get_bearing(coord1, coord2):\n \"\"\"\n Get the bearing of two coordinates\n \"\"\"\n if (coord1 is None) or (coord2 is None):\n return None\n\n lat1, long1 = coord1\n lat2, long2 = coord2\n dLon = (long2 - long1)\n x = np.cos(np.radians(lat2)) * np.sin(np.radians(dLon))\n y = np.cos(np.radians(lat1)) * np.sin(np.radians(lat2)) - \\\n np.sin(np.radians(lat1)) * np.cos(np.radians(lat2)) * \\\n np.cos(np.radians(dLon))\n brng = np.arctan2(x, y)\n brng = np.degrees(brng)\n\n return -brng + 90\n\n\ndef is_float(element):\n \"\"\"\n Check if an element is a float or not\n \"\"\"\n try:\n float(element)\n return True\n except (ValueError, TypeError):\n return False\n\n\nclass GPSVis:\n \"\"\"\n modified from:\n https://github.com/tisljaricleo/GPS-visualization-Python\n MIT License\n Copyright (c) 2021 Leo Tišljarić\n\n Class for GPS data visualization using pre-downloaded OSM map in image format.\n \"\"\"\n\n def __init__(self, position=None, map_path=None, bounds=None):\n \"\"\"\n :param data_path: Path to file containing GPS records.\n :param map_path: Path to pre-downloaded OSM map in image format.\n :param bounds: Upper-left, and lower-right GPS points of the map (lat1, lon1, lat2, lon2).\n \"\"\"\n self.position = position\n self.map_path = map_path\n self.bounds = bounds\n\n if self.map_path is not None and self.bounds is not None:\n self.img = self.create_image_from_map()\n elif self.position is not None:\n self.zoom = 17\n self.TILE_SIZE = 256\n distance = 100\n\n coord = self.position\n\n lat_dist = distance/111111\n lon_dist = distance / (111111 * np.cos(np.radians(coord[0])))\n top, bot = coord[0] + lat_dist, coord[0] - lat_dist\n lef, rgt = coord[1] - lon_dist, coord[1] + lon_dist\n self.bounds = [top, lef, bot, rgt]\n\n self.img = self.create_image_from_position()\n self.get_ticks()\n self.cell_size = 1\n self.xedges = np.arange(0, self.width_meters +\n self.cell_size, self.cell_size)\n self.yedges = np.arange(0, self.height_meters +\n self.cell_size, self.cell_size)\n\n def plot_map(self, axis1=None, output=None, save_as='resultMap.png'):\n \"\"\"\n Method for plotting the map. You can choose to save it in file or to plot it.\n :param output: Type 'plot' to show the map or 'save' to save it. Default None\n :param save_as: Name and type of the resulting image.\n :return:\n \"\"\"\n # create Fig and Axis if doesn't exist\n if axis1 is None:\n fig, axis1 = plt.subplots(figsize=(10, 13))\n\n # Plot background map\n axis1.imshow(np.flipud(self.img), alpha=0.7, origin='lower')\n\n # Set axis dimensions, labels and tick marks\n axis1.set_xlim(0, int(self.width_meters))\n axis1.set_ylim(0, int(self.height_meters))\n axis1.set_xlabel('Longitude')\n axis1.set_ylabel('Latitude')\n axis1.set_xticks(np.linspace(0, int(self.width_meters), num=8))\n axis1.set_xticklabels(self.x_ticks, rotation=30, ha='center')\n axis1.set_yticks(np.linspace(0, int(self.height_meters), num=8))\n axis1.set_yticklabels(self.y_ticks)\n axis1.grid()\n\n # Save or display\n if output == 'save':\n plt.savefig(save_as)\n elif output == 'plot':\n plt.show()\n\n def point_to_pixels(self, lat, lon, zoom):\n \"\"\"convert gps coordinates to web mercator\"\"\"\n r = math.pow(2, zoom) * self.TILE_SIZE\n lat = math.radians(lat)\n\n x = int((lon + 180.0) / 360.0 * r)\n y = int(\n (1.0 - math.log(math.tan(lat) + (1.0 / math.cos(lat))) / math.pi) / 2.0 * r)\n\n return x, y\n\n def create_image_from_position(self):\n URL = 'https://tile.openstreetmap.org/{z}/{x}/{y}.png'.format\n\n top, lef, bot, rgt = self.bounds\n\n x0, y0 = self.point_to_pixels(top, lef, self.zoom)\n x1, y1 = self.point_to_pixels(bot, rgt, self.zoom)\n\n x0_tile, y0_tile = int(x0 / self.TILE_SIZE), int(y0 / self.TILE_SIZE)\n x1_tile, y1_tile = math.ceil(\n x1 / self.TILE_SIZE), math.ceil(y1 / self.TILE_SIZE)\n\n assert (x1_tile - x0_tile) * (y1_tile -\n y0_tile) < 50, \"That's too many tiles!\"\n\n # full size image we'll add tiles to\n img = Image.new('RGB', (\n (x1_tile - x0_tile) * self.TILE_SIZE,\n (y1_tile - y0_tile) * self.TILE_SIZE))\n\n # loop through every tile inside our bounded box\n for x_tile, y_tile in product(range(x0_tile, x1_tile), range(y0_tile, y1_tile)):\n with requests.get(URL(x=x_tile, y=y_tile, z=self.zoom)) as resp:\n tile_img = Image.open(BytesIO(resp.content))\n # add each tile to the full size image\n img.paste(\n im=tile_img,\n box=((x_tile - x0_tile) * self.TILE_SIZE, (y_tile - y0_tile) * self.TILE_SIZE))\n\n x, y = x0_tile * self.TILE_SIZE, y0_tile * self.TILE_SIZE\n\n img = img.crop((\n int(x0-x), # left\n int(y0-y), # top\n int(x1-x), # right\n int(y1-y))) # bottom\n\n self.width_meters = get_distance(\n (self.bounds[0], self.bounds[1]), (self.bounds[0], self.bounds[3]))\n self.height_meters = get_distance(\n (self.bounds[0], self.bounds[1]), (self.bounds[2], self.bounds[1]))\n img = img.resize((int(self.width_meters), int(self.height_meters)))\n\n return img\n\n def create_image_from_map(self):\n \"\"\"\n Create the image that contains the original map and the GPS records.\n :param color: Color of the GPS records.\n :param width: Width of the drawn GPS records.\n :return:\n \"\"\"\n\n img = Image.open(self.map_path, 'r')\n self.width_meters = get_distance(\n (self.bounds[0], self.bounds[1]), (self.bounds[0], self.bounds[3]))\n self.height_meters = get_distance(\n (self.bounds[0], self.bounds[1]), (self.bounds[2], self.bounds[1]))\n img = img.resize((int(self.width_meters), int(self.height_meters)))\n print('background image size (pixels) = ', img.size)\n\n return img\n\n def scale_to_img(self, lat_lon, w_h):\n \"\"\"\n Conversion from latitude and longitude to the image pixels.\n It is used for drawing the GPS records on the map image.\n :param lat_lon: GPS record to draw (lat1, lon1).\n :param w_h: Size of the map image (w, h).\n :return: Tuple containing x and y coordinates to draw on map image.\n \"\"\"\n # https://gamedev.stackexchange.com/questions/33441/how-to-convert-a-number-from-one-min-max-set-to-another-min-max-set/33445\n lat_old = (self.bounds[2], self.bounds[0])\n new = (0, w_h[1])\n y = ((lat_lon[0] - lat_old[0]) * (new[1] - new[0]) /\n (lat_old[1] - lat_old[0])) + new[0]\n lon_old = (self.bounds[1], self.bounds[3])\n new = (0, w_h[0])\n x = ((lat_lon[1] - lon_old[0]) * (new[1] - new[0]) /\n (lon_old[1] - lon_old[0])) + new[0]\n # y must be reversed because the orientation of the image in the matplotlib.\n # image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner\n return int(x), int(y) # w_h[1] - int(y)\n\n def set_origin(self, lat_lon):\n\n self.origin = self.scale_to_img(\n lat_lon, (int(self.width_meters), int(self.height_meters)))\n\n def get_ticks(self):\n \"\"\"\n Generates custom ticks based on the GPS coordinates of the map for the matplotlib output.\n :return:\n \"\"\"\n self.x_ticks = map(\n lambda x: round(x, 4),\n np.linspace(self.bounds[1], self.bounds[3], num=8))\n self.y_ticks = map(\n lambda x: round(x, 4),\n np.linspace(self.bounds[2], self.bounds[0], num=8))\n # Ticks must be reversed because the orientation of the image in the matplotlib.\n # image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner\n self.y_ticks = list(self.y_ticks) # sorted(y_ticks, reverse=True)\n self.x_ticks = list(self.x_ticks)\n\n\nclass Results:\n '''\n Results class for saving run results\n to file with common format.\n '''\n\n def __init__(self, method_name='', global_start_time='', num_iters=0, plotting=False, config={}):\n self.num_iters = num_iters\n self.method_name = method_name\n self.global_start_time = global_start_time\n self.plotting = plotting\n if not isinstance(self.plotting, bool):\n if self.plotting in ('true', 'True'):\n self.plotting = True\n else:\n self.plotting = False\n self.native_plot = config.get('native_plot', 'false').lower()\n self.plot_every_n = int(config.get('plot_every_n', 1))\n self.make_gif = config.get('make_gif', 'false').lower()\n\n self.namefile = f'{RUN_DIR}/{method_name}/{global_start_time}_data.csv'\n self.plot_dir = config.get(\n 'plot_dir', f'{RUN_DIR}/{method_name}/{global_start_time}')\n self.logdir = f'{RUN_DIR}/{method_name}/{global_start_time}_logs/'\n if self.make_gif == 'true':\n Path(self.plot_dir+'/png/').mkdir(parents=True, exist_ok=True)\n Path(self.plot_dir+'/gif/').mkdir(parents=True, exist_ok=True)\n Path(self.logdir).mkdir(parents=True, exist_ok=True)\n self.col_names = ['time', 'run_time', 'target_state', 'sensor_state',\n 'action', 'observation', 'reward', 'collisions', 'lost',\n 'r_err', 'theta_err', 'heading_err', 'centroid_err', 'rmse', 'mae', 'inference_times', 'pf_cov']\n\n self.pf_stats = defaultdict(list)\n self.abs_target_hist = []\n self.abs_sensor_hist = []\n self.target_hist = []\n self.sensor_hist = []\n self.sensor_gps_hist = []\n self.history_length = 50\n self.time_step = 0\n self.texts = []\n self.openstreetmap = None\n self.transform = None\n self.expected_target_rssi = None\n\n if config:\n write_header_log(config, self.method_name, self.global_start_time)\n\n def write_dataframe(self, run_data):\n \"\"\"\n Save dataframe to CSV file\n \"\"\"\n if os.path.isfile(self.namefile):\n print('Updating file {}'.format(self.namefile))\n else:\n print('Saving file to {}'.format(self.namefile))\n df = pd.DataFrame(run_data, columns=self.col_names)\n df.to_csv(self.namefile)\n\n def save_gif(self, run, sub_run=None):\n filename = run if sub_run is None else '{}_{}'.format(run, sub_run)\n # Build GIF\n with imageio.get_writer('{}/gif/{}.gif'.format(self.plot_dir, filename), mode='I', fps=5) as writer:\n for png_filename in sorted(os.listdir(self.plot_dir+'/png/'), key=lambda x: (len(x), x)):\n image = imageio.imread(self.plot_dir+'/png/'+png_filename)\n writer.append_data(image)\n\n def live_plot(self, env, time_step=None, fig=None, ax=None, data=None):\n \"\"\"\n Create a live plot\n \"\"\"\n if self.openstreetmap is None and data.get('position', None) is not None and data.get('bearing', None) is not None:\n self.openstreetmap = GPSVis(\n position=data['position']\n # map_path='map_delta_park.png', # Path to map downloaded from the OSM.\n # bounds=(45.60311,-122.68450, 45.59494, -122.67505) # upper left, lower right\n )\n self.openstreetmap.set_origin(data['position'])\n self.transform = np.array(\n [self.openstreetmap.origin[0], self.openstreetmap.origin[1]])\n\n self.time_step = time_step\n self.pf_stats['mean_hypothesis'].append(\n env.pf.mean_hypothesis if hasattr(env.pf, 'mean_hypothesis') else [None])\n self.pf_stats['map_hypothesis'].append(\n env.pf.map_hypothesis if hasattr(env.pf, 'map_hypothesis') else [None])\n self.pf_stats['mean_state'].append(\n env.pf.mean_state if hasattr(env.pf, 'mean_state') else [None])\n self.pf_stats['map_state'].append(\n env.pf.map_state if hasattr(env.pf, 'map_state') else [None])\n\n abs_sensor = env.state.sensor_state\n abs_particles = env.get_absolute_particles()\n self.sensor_hist.append(abs_sensor)\n\n target_bearing = None\n target_relative_bearing = None\n\n if data.get('position', None) is not None and data.get('drone_position', None) is not None and data.get('bearing', None) is not None:\n target_bearing = get_bearing(\n data['position'], data['drone_position'])\n target_relative_bearing = target_bearing - data['bearing']\n target_distance = get_distance(\n data['position'], data['drone_position'])\n self.expected_target_rssi = env.sensor.observation(\n [[target_distance, target_relative_bearing, None, None]])[0]\n\n ax.clear()\n if self.openstreetmap is not None:\n self.openstreetmap.plot_map(axis1=ax)\n # TODO get variables\n ax.set_title('Time = {}, Frequency = {}, Bandwidth = {}, Gain = {}'.format(\n time_step, None, None, None))\n\n color_array = [['salmon', 'darkred', 'red'],\n ['lightskyblue', 'darkblue', 'blue']]\n lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html\n\n # Plot Particles\n for t in range(env.state.n_targets):\n particles_x, particles_y = pol2cart(\n abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))\n if self.transform is not None:\n particles_x += self.transform[0]\n particles_y += self.transform[1]\n line1, = ax.plot(particles_x, particles_y, 'o',\n color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)\n\n if self.openstreetmap:\n heatmap, xedges, yedges = np.histogram2d(particles_x, particles_y, bins=(\n self.openstreetmap.xedges, self.openstreetmap.yedges))\n heatmap = gaussian_filter(heatmap, sigma=8)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n im = ax.imshow(heatmap.T, extent=extent, origin='lower',\n cmap='jet', interpolation='nearest', alpha=0.2)\n # plt.colorbar(im)\n\n centroid_x = np.mean(particles_x)\n centroid_y = np.mean(particles_y)\n line2, = ax.plot(centroid_x, centroid_y, '*', color='magenta',\n markeredgecolor='black', label='centroid', markersize=12, zorder=2)\n\n if t == 0:\n lines.extend([line1, line2])\n else:\n lines.extend([])\n\n # Plot Sensor\n sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[\n :, 0], np.radians(np.array(self.sensor_hist)[:, 1]))\n if self.transform is not None:\n sensor_x += self.transform[0]\n sensor_y += self.transform[1]\n if len(self.sensor_hist) > 1:\n ax.arrow(sensor_x[-2], sensor_y[-2], 4*(sensor_x[-1]-sensor_x[-2]),\n 4*(sensor_y[-1]-sensor_y[-2]), width=1.5, color='blue', zorder=4)\n ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=3.0,\n color='blue', markeredgecolor='black', markersize=4, zorder=4)\n line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H',\n color='blue', label='sensor', markersize=10, zorder=4)\n lines.extend([line4])\n\n if self.openstreetmap and data.get('drone_position', None) is not None:\n self.target_hist.append(self.openstreetmap.scale_to_img(\n data['drone_position'], (self.openstreetmap.width_meters, self.openstreetmap.height_meters)))\n target_np = np.array(self.target_hist)\n if len(self.target_hist) > 1:\n ax.plot(target_np[:, 0], target_np[:, 1], linewidth=3.0,\n color='maroon', zorder=3, markersize=4)\n line5, = ax.plot(target_np[-1, 0], target_np[-1, 1], 'o', color='maroon',\n markeredgecolor='black', label='target', markersize=10, zorder=3)\n lines.extend([line5])\n\n # Legend\n ax.legend(handles=lines, loc='upper left', bbox_to_anchor=(\n 1.04, 1.0), fancybox=True, shadow=True, ncol=1)\n\n # X/Y Limits\n if self.openstreetmap is None:\n map_width = 600\n min_map = -1*int(map_width/2)\n max_map = int(map_width/2)\n ax.set_xlim(min_map, max_map)\n ax.set_ylim(min_map, max_map)\n\n # Sidebar Text\n # actual_str = r'$\\bf{Actual}$''\\n' # prettier format but adds ~0.04 seconds ???\n actual_str = 'Actual\\n'\n actual_str += 'Bearing = {:.0f} deg\\n'.format(data.get(\n 'bearing', None)) if data.get('bearing', None) else 'Bearing = unknown\\n'\n actual_str += 'Speed = {:.2f} m/s'.format(data.get('action_taken', None)[\n 1]) if data.get('action_taken', None) else 'Speed = unknown\\n'\n\n proposal_str = 'Proposed\\n'\n proposal_str += 'Bearing = {:.0f} deg\\n'.format(data.get('action_proposal', None)[\n 0]) if None not in data.get('action_proposal', (None, None)) else 'Bearing = unknown\\n'\n proposal_str += 'Speed = {:.2f} m/s'.format(data.get('action_proposal', None)[\n 1]) if None not in data.get('action_proposal', (None, None)) else 'Speed = unknown\\n'\n\n last_mean_hyp = self.pf_stats['mean_hypothesis'][-1][0]\n last_map_hyp = self.pf_stats['map_hypothesis'][-1][0]\n\n rssi_str = 'RSSI\\n'\n rssi_str += 'Observed = {:.1f} dB\\n'.format(\n env.last_observation) if env.last_observation else 'Observed = unknown\\n'\n rssi_str += 'Expected = {:.1f} dB\\n'.format(\n self.expected_target_rssi) if self.expected_target_rssi else 'Expected = unknown\\n'\n rssi_str += 'Difference = {:.1f} dB\\n'.format(env.last_observation - self.expected_target_rssi) if (\n env.last_observation and self.expected_target_rssi) else ''\n #rssi_str += 'Target bearing = {} \\n'.format(target_bearing) if target_bearing else ''\n #rssi_str += 'Target relative bearing = {} \\n'.format(target_relative_bearing) if target_relative_bearing else ''\n rssi_str += 'MLE estimate = {:.1f} dB\\n'.format(\n last_mean_hyp) if last_mean_hyp else 'MLE estimate = unknown'\n rssi_str += 'MAP estimate = {:.1f} dB'.format(\n last_map_hyp) if last_map_hyp else 'MAP estimate = unknown'\n\n if len(fig.texts) == 0:\n props = dict(boxstyle='round', facecolor='palegreen', alpha=0.5)\n text = fig.text(1.04, 0.75, actual_str, transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n props = dict(boxstyle='round',\n facecolor='paleturquoise', alpha=0.5)\n text = fig.text(1.04, 0.5, proposal_str, transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n props = dict(boxstyle='round', facecolor='khaki', alpha=0.5)\n text = fig.text(1.04, 0.25, rssi_str, transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n else:\n fig.texts[0].set_text(actual_str)\n fig.texts[1].set_text(proposal_str)\n fig.texts[2].set_text(rssi_str)\n\n self.native_plot = 'true' if time_step % self.plot_every_n == 0 else 'false'\n if self.native_plot == 'true':\n plt.draw()\n plt.pause(0.001)\n if self.make_gif == 'true':\n png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)\n print('saving plots in {}'.format(png_filename))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def build_multitarget_plots(self, env, time_step=None, fig=None, axs=None, centroid_distance_error=None, selected_plots=[1, 2, 3, 4, 5], simulated=True, textstr=None):\n xp = env.state.target_state\n belief = env.pf.particles.reshape(\n len(env.pf.particles), env.state.n_targets, 4)\n #print('sensor state = ',env.state.sensor_state)\n abs_sensor = env.state.sensor_state\n\n abs_particles = env.get_absolute_particles()\n\n if simulated:\n abs_target = np.array(env.get_absolute_target())\n else:\n abs_target = None\n\n # these are matplotlib.patch.Patch properties\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n\n if len(self.abs_target_hist) < self.history_length:\n self.abs_target_hist = [abs_target] * self.history_length\n self.abs_sensor_hist = [abs_sensor] * self.history_length\n else:\n self.abs_target_hist.pop(0)\n self.abs_target_hist.append(abs_target)\n self.abs_sensor_hist.pop(0)\n self.abs_sensor_hist.append(abs_sensor)\n\n if len(self.target_hist) == 150:\n self.target_hist = []\n self.sensor_hist = []\n self.rel_sensor_hist = []\n\n self.target_hist.append(abs_target)\n self.sensor_hist.append(abs_sensor)\n\n plt.tight_layout()\n # Put space between plots\n plt.subplots_adjust(wspace=0.7, hspace=0.2)\n\n color_array = [['salmon', 'darkred', 'red'],\n ['lightskyblue', 'darkblue', 'blue']]\n\n plot_count = 0\n if axs is None:\n axs = {}\n\n map_width = 600\n min_map = -1*int(map_width/2)\n max_map = int(map_width/2)\n cell_size = int((max_map - min_map)/max_map)\n cell_size = 2\n xedges = np.arange(min_map, max_map+cell_size, cell_size)\n yedges = np.arange(min_map, max_map+cell_size, cell_size)\n\n if 1 in selected_plots:\n # Plot 1: Particle Plot (Polar)\n plot_count += 1\n if 1 not in axs:\n axs[1] = fig.add_subplot(\n 1, len(selected_plots), plot_count, polar=True)\n ax = axs[1]\n ax.clear()\n\n for t in range(env.state.n_targets):\n # plot particles\n plot_theta = np.radians(belief[:, t, 1])\n plot_r = belief[:, t, 0] # [row[0] for row in belief]\n\n ax.plot(plot_theta, plot_r, 'o', color=color_array[t][0], markersize=4,\n markeredgecolor='black', label='particles', alpha=0.3, zorder=1)\n\n # plot targets\n plot_x_theta = np.radians(xp[t, 1])\n plot_x_r = xp[t, 0]\n\n ax.set_ylim(0, 300)\n if 2 in selected_plots:\n # Plot 2: Particle Plot (Polar) with Interpolation\n plot_count += 1\n if 2 not in axs:\n axs[2] = fig.add_subplot(\n 1, len(selected_plots), plot_count, polar=True)\n ax = axs[2]\n\n for t in range(env.state.n_targets):\n # Create grid values first via histogram.\n nbins = 10\n plot_theta = np.radians(belief[:, t, 1])\n plot_r = belief[:, t, 0] # [row[0] for row in belief]\n counts, xbins, ybins = np.histogram2d(\n plot_theta, plot_r, bins=nbins)\n # Make a meshgrid for theta, r values\n tm, rm = np.meshgrid(xbins[:-1], ybins[:-1])\n # Build contour plot\n ax.contourf(tm, rm, counts)\n # True position\n plot_x_theta = np.radians(xp[t, 1])\n plot_x_r = xp[t, 0]\n ax.plot(plot_x_theta, plot_x_r, 'X')\n\n ax.set_ylim(0, 300)\n if 3 in selected_plots:\n # Plot 3: Heatmap Plot (Cartesian)\n plot_count += 1\n if 3 not in axs:\n axs[3] = fig.add_subplot(1, len(selected_plots), plot_count)\n ax = axs[3]\n\n # COMBINED; UNCOMMENT AFTER PAPER PLOT\n all_particles_x, all_particles_y = [], []\n\n for t in range(env.state.n_targets):\n cart = np.array(\n list(map(pol2cart, belief[:, t, 0], np.radians(belief[:, t, 1]))))\n x = cart[:, 0]\n y = cart[:, 1]\n all_particles_x.extend(x)\n all_particles_y.extend(y)\n\n heatmap, xedges, yedges = np.histogram2d(\n all_particles_x, all_particles_y, bins=(xedges, yedges))\n heatmap = gaussian_filter(heatmap, sigma=8)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n im = ax.imshow(heatmap.T, extent=extent, origin='lower',\n cmap='jet', interpolation='nearest')\n plt.colorbar(im)\n ax.set_xlim(min_map, max_map)\n ax.set_ylim(min_map, max_map)\n if 4 in selected_plots:\n # Plot 4: Absolute Polar coordinates\n plot_count += 1\n if 4 not in axs:\n axs[4] = fig.add_subplot(\n 1, len(selected_plots), plot_count, polar=True)\n ax = axs[4]\n ax.clear()\n\n lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html\n for t in range(env.state.n_targets):\n particles_x, particles_y = pol2cart(\n abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))\n centroid_x = np.mean(particles_x)\n centroid_y = np.mean(particles_y)\n centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)\n target_r, target_theta, target_x, target_y = [], [], [], []\n\n for i in range(5):\n target_r.append(\n self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][0])\n target_theta.append(np.radians(\n self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][1]))\n target_x, target_y = pol2cart(target_r, target_theta)\n if len(self.target_hist) > 1:\n ax.plot(np.radians(np.array(self.target_hist)[:-1, t, 1]), np.array(self.target_hist)[\n :-1, t, 0], linewidth=4.0, color='limegreen', zorder=3, markersize=12)\n\n line0, = ax.plot(target_theta[4], target_r[4], 'X', color='limegreen',\n markeredgecolor='black', label='targets', markersize=20, zorder=4)\n\n line1, = ax.plot(np.radians(abs_particles[:, t, 1]), abs_particles[:, t, 0], 'o', color=color_array[t]\n [0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)\n if t == 0:\n lines.extend([line0, line1])\n else:\n lines.extend([line0])\n\n if len(self.sensor_hist) > 1:\n ax.plot(np.radians(np.array(self.sensor_hist)[:-1, 1]), np.array(self.sensor_hist)[\n :-1, 0], linewidth=4.0, color='mediumorchid', zorder=3, markersize=12)\n\n line4, = ax.plot(np.radians(self.sensor_hist[-1][1]), self.sensor_hist[-1][0], 'H',\n color='mediumorchid', markeredgecolor='black', label='sensor', markersize=20, zorder=3)\n lines.extend([line4])\n ax.legend(handles=lines, loc='center left', bbox_to_anchor=(\n 1.08, 0.5), fancybox=True, shadow=True,)\n ax.set_ylim(0, 250)\n if 5 in selected_plots:\n # Plot 5: Absolute Cartesian coordinates\n plot_count += 1\n if 5 not in axs:\n axs[5] = fig.add_subplot(1, len(selected_plots), plot_count)\n ax = axs[5]\n\n xedges = np.arange(min_map, max_map, cell_size)\n yedges = np.arange(min_map, max_map, cell_size)\n heatmap_combined = None\n all_particles_x, all_particles_y = [], []\n for t in range(env.state.n_targets):\n\n particles_x, particles_y = pol2cart(\n abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))\n all_particles_x.extend(particles_x)\n all_particles_y.extend(particles_y)\n centroid_x = np.mean(particles_x)\n centroid_y = np.mean(particles_y)\n centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)\n target_r, target_theta, target_x, target_y = [], [], [], []\n for i in range(5):\n target_r.append(self.abs_target_hist[10*(i+1)-1][t][0])\n target_theta.append(np.radians(\n self.abs_target_hist[10*(i+1)-1][t][1]))\n target_x, target_y = pol2cart(target_r, target_theta)\n\n ax.plot(centroid_x, centroid_y, '*',\n label='centroid', markersize=12)\n\n ax.plot(target_x[4], target_y[4], 'X',\n label='target', markersize=12)\n sensor_r, sensor_theta, sensor_x, sensor_y = [], [], [], []\n for i in range(5):\n sensor_r.append(self.abs_sensor_hist[10*(i+1)-1][0])\n sensor_theta.append(np.radians(\n self.abs_sensor_hist[10*(i+1)-1][1]))\n sensor_x, sensor_y = pol2cart(sensor_r, sensor_theta)\n ax.plot(sensor_x[4], sensor_y[4], 'p',\n label='sensor', markersize=12)\n\n heatmap, xedges, yedges = np.histogram2d(\n all_particles_x, all_particles_y, bins=(xedges, yedges))\n heatmap = gaussian_filter(heatmap, sigma=8)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n im = ax.imshow(heatmap.T, extent=extent, origin='lower',\n cmap='jet', interpolation='nearest')\n plt.colorbar(im)\n\n ax.legend(loc='center left', bbox_to_anchor=(\n 1.2, 0.5), fancybox=True, shadow=True,)\n ax.set_xlim(min_map, max_map)\n ax.set_ylim(min_map, max_map)\n if 6 in selected_plots:\n # Plot 1: Particle Plot (Polar)\n plot_count += 1\n if 6 not in axs:\n axs[6] = fig.add_subplot(1, len(selected_plots), plot_count)\n ax = axs[6]\n ax.clear()\n\n for t in range(env.state.n_targets):\n # plot particles\n plot_theta = np.radians(belief[:, t, 1])\n plot_r = belief[:, t, 0]\n particles_x, particles_y = pol2cart(\n belief[:, t, 0], np.radians(belief[:, t, 1]))\n ax.plot(particles_x, particles_y, 'o',\n color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)\n\n # plot targets\n plot_x_theta = np.radians(xp[t, 1])\n plot_x_r = xp[t, 0]\n\n ax.set_xlim(min_map, max_map)\n ax.set_ylim(min_map, max_map)\n\n sensor_x, sensor_y = pol2cart(\n self.sensor_hist[-1][0], np.radians(self.sensor_hist[-1][1]))\n if 7 in selected_plots:\n plot_count += 1\n if 7 not in axs:\n axs[7] = fig.add_subplot(1, len(selected_plots), plot_count)\n ax = axs[7]\n ax.clear()\n\n lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html\n for t in range(env.state.n_targets):\n particles_x, particles_y = pol2cart(\n abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))\n centroid_x = np.mean(particles_x)\n centroid_y = np.mean(particles_y)\n centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)\n target_r, target_theta, target_x, target_y = [], [], [], []\n\n for i in range(5):\n target_r.append(\n self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][0])\n target_theta.append(np.radians(\n self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][1]))\n target_x, target_y = pol2cart(target_r, target_theta)\n target_x, target_y = pol2cart(np.array(self.target_hist)[\n :, t, 0], np.radians(np.array(self.target_hist)[:, t, 1]))\n\n if len(self.target_hist) > 1:\n ax.plot(target_x[:-1], target_y[:-1], linewidth=4.0,\n color='limegreen', zorder=3, markersize=12)\n\n line0, = ax.plot(target_x[-1], target_y[-1], 'X', color='limegreen',\n markeredgecolor='black', label='targets', markersize=20, zorder=4)\n\n line1, = ax.plot(particles_x, particles_y, 'o',\n color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)\n #ax.plot(centroid_theta, centroid_r, '*', color=color_array[t][1],markeredgecolor='white', label='centroid', markersize=12, zorder=2)\n if t == 0:\n lines.extend([line0, line1])\n else:\n lines.extend([line0])\n\n sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[\n :, 0], np.radians(np.array(self.sensor_hist)[:, 1]))\n if len(self.sensor_hist) > 1:\n ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=4.0,\n color='mediumorchid', zorder=3, markersize=12)\n\n line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H', color='mediumorchid',\n markeredgecolor='black', label='sensor', markersize=20, zorder=3)\n lines.extend([line4])\n ax.legend(handles=lines, loc='center left', bbox_to_anchor=(\n 1.08, 0.5), fancybox=True, shadow=True,)\n\n ax.set_xlim(min_map, max_map)\n ax.set_ylim(min_map, max_map)\n if 8 in selected_plots:\n plot_count += 1\n if 8 not in axs:\n axs[8] = fig.add_subplot(1, len(selected_plots), plot_count)\n ax = axs[8]\n ax.clear()\n\n lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html\n for t in range(env.state.n_targets):\n particles_x, particles_y = pol2cart(\n abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))\n centroid_x = np.mean(particles_x)\n centroid_y = np.mean(particles_y)\n centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)\n\n line1, = ax.plot(particles_x, particles_y, 'o',\n color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)\n if t == 0:\n lines.extend([line1])\n else:\n lines.extend([])\n\n sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[\n :, 0], np.radians(np.array(self.sensor_hist)[:, 1]))\n if len(self.sensor_hist) > 1:\n ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=4.0,\n color='mediumorchid', zorder=3, markersize=12)\n\n line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H', color='mediumorchid',\n markeredgecolor='black', label='sensor', markersize=20, zorder=3)\n lines.extend([line4])\n ax.legend(handles=lines, loc='upper center', bbox_to_anchor=(\n 0.5, -0.05), fancybox=True, shadow=True, ncol=2)\n\n ax.set_xlim(min_map, max_map)\n ax.set_ylim(min_map, max_map)\n if textstr:\n props = dict(boxstyle='round',\n facecolor='palegreen', alpha=0.5)\n ax.text(1.04, 0.75, textstr[0], transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n props = dict(boxstyle='round',\n facecolor='paleturquoise', alpha=0.5)\n ax.text(1.04, 0.5, textstr[1], transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n\n png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)\n return axs\n\n def build_plots(self, xp=[], belief=[], abs_sensor=None, abs_target=None, abs_particles=None, time_step=None, fig=None, ax=None):\n print(belief.shape)\n if len(self.abs_target_hist) < self.history_length:\n self.abs_target_hist = [abs_target] * self.history_length\n self.abs_sensor_hist = [abs_sensor] * self.history_length\n else:\n self.abs_target_hist.pop(0)\n self.abs_target_hist.append(abs_target)\n self.abs_sensor_hist.pop(0)\n self.abs_sensor_hist.append(abs_sensor)\n\n fig = plt.figure(figsize=(30, 6))\n plt.tight_layout()\n # Put space between plots\n plt.subplots_adjust(wspace=0.2, hspace=0.2)\n\n # Plot 1: Particle Plot (Polar)\n ax = fig.add_subplot(1, 5, 1, polar=True)\n grid_r, grid_theta = [], []\n plot_r = [row[0] for row in belief]\n plot_theta = np.radians(np.array([row[1] for row in belief]))\n plot_x_theta = np.radians(xp[1])\n plot_x_r = xp[0]\n ax.plot(plot_theta, plot_r, 'ro')\n ax.plot(plot_x_theta, plot_x_r, 'bo')\n ax.set_ylim(-150, 150)\n ax.set_title('iteration {}'.format(time_step), fontsize=16)\n\n # Plot 2: Particle Plot (Polar) with Interpolation\n ax = fig.add_subplot(1, 5, 2, polar=True)\n # Create grid values first via histogram.\n nbins = 10\n counts, xbins, ybins = np.histogram2d(plot_theta, plot_r, bins=nbins)\n # Make a meshgrid for theta, r values\n tm, rm = np.meshgrid(xbins[:-1], ybins[:-1])\n # Build contour plot\n ax.contourf(tm, rm, counts)\n # True position\n ax.plot(plot_x_theta, plot_x_r, 'bo')\n ax.set_ylim(-150, 150)\n ax.set_title('Interpolated Belief'.format(time_step), fontsize=16)\n\n # Plot 3: Heatmap Plot (Cartesian)\n ax = fig.add_subplot(1, 5, 3)\n cart = np.array(\n list(map(pol2cart, belief[:, 0], np.radians(belief[:, 1]))))\n x = cart[:, 0]\n y = cart[:, 1]\n xedges = np.arange(-150, 153, 3)\n yedges = np.arange(-150, 153, 3)\n heatmap, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n heatmap = gaussian_filter(heatmap, sigma=5)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n im = ax.imshow(heatmap.T, extent=extent,\n origin='lower', cmap='coolwarm')\n plt.colorbar(im)\n ax.set_xlim(-200, 200)\n ax.set_ylim(-200, 200)\n ax.set_title('Particle heatmap (relative to sensor)')\n\n # Plots 4 & 5: Absolute Particle/Sensor/Target Plot\n # particles/centroid coordinates\n particles_x, particles_y = pol2cart(\n abs_particles[:, 0], np.radians(abs_particles[:, 1]))\n centroid_x = np.mean(particles_x)\n centroid_y = np.mean(particles_y)\n centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)\n sensor_r, sensor_theta, sensor_x, sensor_y = [], [], [], []\n target_r, target_theta, target_x, target_y = [], [], [], []\n for i in range(5):\n sensor_r.append(self.abs_sensor_hist[10*(i+1)-1][0])\n sensor_theta.append(np.radians(\n self.abs_sensor_hist[10*(i+1)-1][1]))\n target_r.append(self.abs_target_hist[10*(i+1)-1][0])\n target_theta.append(np.radians(\n self.abs_target_hist[10*(i+1)-1][1]))\n sensor_x[i], sensor_y[i] = pol2cart(sensor_r, sensor_theta)\n target_x[i], target_y[i] = pol2cart(target_r, target_theta)\n\n # Plot 4: Absolute Polar coordinates\n ax = fig.add_subplot(1, 5, 4, polar=True)\n ax.plot(np.radians(\n abs_particles[:, 1]), abs_particles[:, 0], 'ro', label='particles', alpha=0.5)\n ax.plot(centroid_theta, centroid_r, 'c*',\n label='centroid', markersize=12)\n ax.plot(sensor_theta[4], sensor_r[4], 'gp',\n label='sensor', markersize=12)\n ax.plot(target_theta[4], target_r[4], 'bX',\n label='target', markersize=12)\n for i in range(4):\n ax.plot(sensor_theta[i], sensor_r[i],\n 'gp', markersize=6, alpha=0.75)\n ax.plot(target_theta[i], target_r[i],\n 'bX', markersize=6, alpha=0.75)\n ax.legend()\n ax.set_title('Absolute positions (polar)'.format(\n time_step), fontsize=16)\n\n # Plot 5: Absolute Cartesian coordinates\n ax = fig.add_subplot(1, 5, 5)\n xedges = np.arange(-100, 103, 3)\n yedges = np.arange(-100, 103, 3)\n heatmap, xedges, yedges = np.histogram2d(\n particles_x, particles_y, bins=(xedges, yedges))\n heatmap = gaussian_filter(heatmap, sigma=2)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n im = ax.imshow(heatmap.T, extent=extent,\n origin='lower', cmap='coolwarm')\n plt.colorbar(im)\n ax.plot(centroid_x, centroid_y, 'c*', label='centroid', markersize=12)\n ax.plot(sensor_x[4], sensor_y[4], 'gp', label='sensor', markersize=12)\n ax.plot(target_x[4], target_y[4], 'bX', label='target', markersize=12)\n for i in range(4):\n ax.plot(sensor_x[i], sensor_y[i], 'gp', markersize=6, alpha=0.55)\n ax.plot(target_x[i], target_y[i], 'bX', markersize=6, alpha=0.55)\n ax.legend()\n ax.set_xlim(-150, 150)\n ax.set_ylim(-150, 150)\n ax.set_title('Absolute positions (cartesian)'.format(\n time_step), fontsize=16)\n\n r_error, theta_error, heading_error, centroid_distance_error, rmse, mae = tracking_error(\n abs_target, abs_particles)\n\n png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)\n print('saving plots in {}'.format(png_filename))\n plt.savefig(png_filename)\n plt.close(fig)\n\n\n##################################################################\n# Logging\n##################################################################\ndef write_header_log(config, method, global_start_time):\n\n if type(config) == configparser.ConfigParser:\n config2log = {section: dict(config[section])\n for section in config.sections()}\n else:\n config2log = dict(config)\n\n # write output header\n if not os.path.isdir(f'{RUN_DIR}/{method}/'):\n os.makedirs(f'{RUN_DIR}/{method}/')\n header_filename = f'{RUN_DIR}/{method}/{global_start_time}_header.txt'\n with open(header_filename, 'w', encoding='UTF-8') as f:\n f.write(json.dumps(config2log))\n\n\ndef read_header_log(filename):\n with open(filename, 'r', encoding='UTF-8') as f:\n config = json.load(f)\n return config\n\n\ndef particles_mean_belief(particles):\n particles_r = particles[:, 0]\n particles_theta = np.radians(particles[:, 1])\n particles_x, particles_y = pol2cart(particles_r, particles_theta)\n\n # centroid of particles x,y\n mean_x = np.mean(particles_x)\n mean_y = np.mean(particles_y)\n\n # centroid of particles r,theta\n mean_r, mean_theta = cart2pol(mean_x, mean_y)\n\n particles_heading = particles[:, 2]\n particles_heading_rad = np.radians(particles_heading)\n mean_heading_rad = np.arctan2(\n np.mean(np.sin(particles_heading_rad)), np.mean(np.cos(particles_heading_rad)))\n mean_heading = np.degrees(mean_heading_rad)\n\n mean_spd = np.mean(particles[:, 3])\n\n return particles_x, particles_y, mean_x, mean_y, mean_r, mean_theta, mean_heading, mean_spd\n\n\ndef particles_centroid_xy(particles):\n particles_r = particles[:, 0]\n particles_theta = np.radians(particles[:, 1])\n particles_x, particles_y = pol2cart(particles_r, particles_theta)\n\n # centroid of particles x,y\n mean_x = np.mean(particles_x)\n mean_y = np.mean(particles_y)\n\n return [mean_x, mean_y]\n\n\ndef angle_diff(angle):\n\n diff = angle % 360\n\n diff = (diff + 360) % 360\n\n diff[diff > 180] -= 360\n return diff\n\n\n\ndef tracking_error(all_targets, all_particles):\n \"\"\"\n Calculate different tracking errors\n \"\"\"\n results = []\n n_targets = len(all_particles[0])//4\n\n # reorder targets to fit closest particles\n min_distance = None\n optimal_target_permutation = None\n\n for idxs in list(permutations(range(n_targets))):\n target_permutation = all_targets[list(idxs)]\n\n distance = 0\n for t in range(n_targets):\n particle_centroid = np.array(\n particles_centroid_xy(all_particles[:, 4*t:4*(t+1)]))\n target = np.array(\n pol2cart(target_permutation[t][0], np.radians(target_permutation[t][1])))\n distance += np.linalg.norm(particle_centroid-target)**2\n if min_distance is None or distance < min_distance:\n min_distance = distance\n optimal_target_permutation = target_permutation\n\n for t in range(n_targets):\n target = optimal_target_permutation[t]\n particles = all_particles[:, 4*t:4*(t+1)]\n\n target_r = target[0]\n target_theta = np.radians(target[1])\n target_heading = target[2]\n target_x, target_y = pol2cart(target_r, target_theta)\n\n particles_x, particles_y, mean_x, mean_y, mean_r, mean_theta, mean_heading, mean_spd = particles_mean_belief(\n particles)\n\n r_error = np.mean(np.abs(target_r - particles[:, 0]))\n theta_error = np.mean(np.abs(angle_diff(target[1] - particles[:, 1])))\n heading_diff = np.abs(np.mean(target_heading - particles[:, 2])) % 360\n heading_error = heading_diff if heading_diff <= 180 else 360-heading_diff\n\n # centroid euclidean distance error x,y\n centroid_distance_error = np.sqrt(\n (mean_x - target_x)**2 + (mean_y - target_y)**2)\n\n mae = np.mean(np.sqrt((particles_x-target_x) **\n 2 + (particles_y - target_y)**2))\n\n # root mean square error\n rmse = np.sqrt(np.mean((particles_x - target_x) **\n 2 + (particles_y - target_y)**2))\n\n results.append([r_error, theta_error, heading_error,\n centroid_distance_error, rmse, mae])\n results = np.array(results).T\n\n r_error = results[0]\n theta_error = results[1]\n heading_error = results[2]\n centroid_distance_error = results[3]\n rmse = results[4]\n mae = results[5]\n\n return r_error, theta_error, heading_error, centroid_distance_error, rmse, mae\n"
] | [
[
"numpy.dot",
"numpy.radians",
"numpy.sqrt",
"numpy.linspace",
"numpy.flipud",
"pandas.DataFrame",
"numpy.arctan2",
"numpy.mean",
"scipy.ndimage.filters.gaussian_filter",
"numpy.hstack",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.sin",
"numpy.copy",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.meshgrid",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.histogram2d",
"numpy.abs",
"numpy.degrees",
"numpy.cos",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.draw",
"numpy.linalg.norm",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pause"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.