repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
brunompacheco/part-counting | [
"dbf71e7465c8e384e3c60694f65819a65742193b"
] | [
"src/features/base.py"
] | [
"from pathlib import Path\n\nimport numpy as np\nimport open3d as o3d\nimport torch\n\nfrom src.data.rgbd import load_rgbd\nfrom src.data.pcd import load_pcd\nfrom .cropping import mask_selection_volume, box_mask_from_rgbd\n\n\ndef preprocess_box_for_cv(img_fpath: Path) -> o3d.geometry.PointCloud:\n \"\"\"Load and strip walls of box, keeping the interior. For CV-based models.\n\n The mask of the interior of the box is extracted using Canny+Hough, which\n is then used to crop the point cloud generated from the RGBD image.\n\n Args:\n img_fpath: Filepath of the .exr image file. Must contain grayscale as\n the first channel and depth as second channel.\n \n Returns:\n box: Point cloud image of the interior of the box.\n \"\"\"\n rgbd = load_rgbd(img_fpath)\n\n box_mask = box_mask_from_rgbd(rgbd)\n\n vol = mask_selection_volume(rgbd, box_mask)\n\n pcd = load_pcd(rgbd)\n\n box = vol.crop_point_cloud(pcd)\n\n return box\n\ndef load_part_model(part_fpath: Path, number_of_points=10000) -> o3d.geometry.PointCloud:\n \"\"\"Load part model as a point cloud image in meters.\n\n Args:\n part_fpath: Filepath of the .stl model file.\n number_of_points: For the resulting point cloud, which is sampled\n uniformly.\n \n Returns:\n part: Point cloud of the part, sampled uniformly.\n \"\"\"\n part_mesh = o3d.io.read_triangle_mesh(str(part_fpath), enable_post_processing=True)\n\n part_mesh.paint_uniform_color([1., 0., 0.,])\n\n part = part_mesh.sample_points_uniformly(number_of_points=number_of_points)\n\n part_points = np.array(part.points) / 1000 # mm to meter conversion\n part_points = part_points + np.array([0,0,0.3])\n part_points = o3d.utility.Vector3dVector(part_points)\n part.points = part_points\n\n return part\n\ndef preprocess_box_for_dl(img_fpath: Path, device: torch.device = None) -> torch.Tensor:\n \"\"\"Load box picture and reshape it. For DL-based models.\n\n Args:\n img_fpath: Filepath of the .png image file.\n device: Torch device where to load the image.\n\n Returns:\n X: Image loaded in a batch-like format (batch with a single sample),\n proper for feeding to a model.\n \"\"\"\n from torchvision import transforms as T\n\n if device is None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n transform = T.Compose([\n T.ToTensor(),\n # normalize required for pre-trained image models,\n # check https://pytorch.org/vision/stable/models.html\n T.Normalize(mean=[0.485, 0.456], std=[0.229, 0.224]),\n ])\n\n data = np.array(o3d.io.read_image(str(img_fpath)))\n data = data[:,:,1:3]\n\n X = transform(data)\n X = X.unsqueeze(0)\n X = X.type(torch.FloatTensor)\n X = X.to(device)\n\n return X\n"
] | [
[
"numpy.array",
"torch.cuda.is_available",
"torch.device"
]
] |
carefree0910/carefree-flow | [
"7035015a072cf8142074d01683889f90950d2939"
] | [
"cflow/misc/internal_/data.py"
] | [
"import os\n\nimport numpy as np\n\nfrom abc import abstractmethod\nfrom abc import ABCMeta\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Type\nfrom typing import Tuple\nfrom typing import Callable\nfrom typing import Optional\nfrom cftool.misc import Saving\nfrom oneflow.utils.data import Dataset\nfrom oneflow.utils.data import SequentialSampler\nfrom oneflow.utils.data import DataLoader as FlowDataLoader\n\nfrom ...types import arrays_type\nfrom ...types import sample_weights_type\nfrom ...protocol import DatasetProtocol\nfrom ...protocol import DataLoaderProtocol\nfrom ...misc.toolkit import to_flow\nfrom ...misc.toolkit import WithRegister\n\n\ndata_modules: Dict[str, Type[\"DataModule\"]] = {}\n\n\nclass DataModule(WithRegister, metaclass=ABCMeta):\n d: Dict[str, Type[\"DataModule\"]] = data_modules\n\n id_file = \"id.txt\"\n info_name = \"info\"\n package_folder = \"data_module\"\n\n @property\n @abstractmethod\n def info(self) -> Dict[str, Any]:\n pass\n\n def prepare(self, sample_weights: sample_weights_type) -> None:\n pass\n\n def initialize(self) -> Any:\n pass\n\n def save(self, folder: str) -> None:\n folder = os.path.join(folder, self.package_folder)\n os.makedirs(folder, exist_ok=True)\n with open(os.path.join(folder, self.id_file), \"w\") as f:\n f.write(self.__identifier__)\n self.save_info(folder)\n\n def save_info(self, folder: str) -> None:\n Saving.save_dict(self.info, self.info_name, folder)\n\n @classmethod\n def load(cls, folder: str) -> Dict[str, Any]:\n folder = os.path.join(folder, cls.package_folder)\n with open(os.path.join(folder, cls.id_file), \"r\") as f:\n base = cls.get(f.read())\n return base.load_info(folder)\n\n @classmethod\n def load_info(cls, folder: str) -> Dict[str, Any]:\n return Saving.load_dict(cls.info_name, folder)\n\n\[email protected](\"dl\")\nclass DLDataModule(DataModule, metaclass=ABCMeta):\n train_loader: DataLoaderProtocol\n valid_loader: Optional[DataLoaderProtocol]\n\n def initialize(self) -> Tuple[DataLoaderProtocol, Optional[DataLoaderProtocol]]:\n pass\n\n\ndef get_weighted_indices(n: int, weights: Optional[np.ndarray]) -> np.ndarray:\n indices = np.arange(n)\n if weights is not None:\n numbers = np.random.multinomial(n, weights)\n indices = indices.repeat(numbers)\n return indices\n\n\[email protected](\"ml\")\nclass MLDataset(DatasetProtocol):\n def __init__(self, x: np.ndarray, y: Optional[np.ndarray]):\n super().__init__()\n self.x = x\n self.y = y\n\n def __len__(self) -> int:\n return len(self.x)\n\n\[email protected](\"ml\")\nclass MLLoader(DataLoaderProtocol):\n data: MLDataset\n cursor: int\n indices: np.ndarray\n\n def __init__(\n self,\n data: MLDataset,\n shuffle: bool,\n *,\n name: Optional[str] = None,\n batch_size: int = 128,\n sample_weights: Optional[np.ndarray] = None,\n ):\n if sample_weights is not None and len(data) != len(sample_weights):\n raise ValueError(\n f\"the number of data samples ({len(data)}) is not identical with \"\n f\"the number of sample weights ({len(sample_weights)})\"\n )\n super().__init__(sample_weights=sample_weights)\n self.data = data\n self.shuffle = shuffle\n self.shuffle_backup = shuffle\n self.name = name\n self.batch_size = batch_size\n\n def __iter__(self) -> \"MLLoader\":\n self.cursor = 0\n self.indices = get_weighted_indices(len(self.data), self.sample_weights)\n if self.shuffle:\n np.random.shuffle(self.indices)\n return self\n\n def __next__(self) -> arrays_type:\n start = self.cursor\n if start >= len(self.data):\n raise StopIteration\n self.cursor += self.batch_size\n indices = self.indices[start : self.cursor]\n return (\n to_flow(self.data.x[indices]),\n None if self.data.y is None else to_flow(self.data.y[indices]),\n to_flow(indices),\n )\n\n def disable_shuffle(self) -> None:\n self.shuffle = False\n\n def recover_shuffle(self) -> None:\n self.shuffle = self.shuffle_backup\n\n def copy(self) -> \"MLLoader\":\n return MLLoader(\n self.data,\n self.shuffle,\n name=self.name,\n batch_size=self.batch_size,\n sample_weights=self.sample_weights,\n )\n\n\[email protected](\"cv\")\nclass CVDataset(DatasetProtocol):\n def __init__(self, dataset: Dataset):\n super().__init__()\n self.dataset = dataset\n\n def __len__(self) -> int:\n return len(self.dataset) # type: ignore\n\n def __getitem__(self, item: Any) -> Any:\n return self.dataset[item]\n\n\nclass DataLoader(FlowDataLoader):\n def __setattr__(self, attr: str, val: Any) -> None:\n if self.__initialized and attr in (\n \"batch_size\",\n \"batch_sampler\",\n \"drop_last\",\n \"dataset\",\n \"persistent_workers\",\n ):\n raise ValueError(\n f\"{attr} attribute should not be set after \"\n f\"{self.__class__.__name__} is initialized\"\n )\n\n super(FlowDataLoader, self).__setattr__(attr, val)\n\n\[email protected](\"cv\")\nclass CVLoader(DataLoaderProtocol):\n data: CVDataset\n\n def __init__(\n self,\n loader: DataLoader,\n batch_callback: Optional[Callable[[Any], arrays_type]] = None,\n *,\n sample_weights: Optional[np.ndarray] = None,\n ):\n if sample_weights is not None:\n raise ValueError(\n \"in `CVLoader`, we should introduce `sample_weights` to the original \"\n \"OneFlow `DataLoader` (by specifying corresponding samplers)\"\n )\n super().__init__(sample_weights=sample_weights)\n self.loader = loader\n self.data = loader.dataset # type: ignore\n self.batch_callback = batch_callback\n self.sampler_backup = loader.sampler\n self._iterator: Optional[Any] = None\n\n def __iter__(self) -> \"CVLoader\":\n self._iterator = self.loader.__iter__()\n return self\n\n def __next__(self) -> arrays_type:\n batch = self._iterator.__next__() # type: ignore\n if self.batch_callback is None:\n return batch\n return self.batch_callback(batch)\n\n def __len__(self) -> int:\n return len(self.loader)\n\n @property\n def batch_size(self) -> int: # type: ignore\n # TODO : consider world size\n # batch_size = self.loader.batch_size\n # if dist.is_initialized():\n # batch_size *= dist.get_world_size()\n # return batch_size\n return self.loader.batch_size\n\n def copy(self) -> \"CVLoader\":\n dataset = self.data.dataset\n self.data.__dict__.pop(\"dataset\")\n copied = super().copy()\n assert isinstance(copied, CVLoader)\n self.data.dataset = copied.data.dataset = dataset\n return copied\n\n def disable_shuffle(self) -> None:\n sampler = SequentialSampler(self.data)\n self.loader.sampler = sampler\n if hasattr(self.loader, \"batch_sampler\"):\n self.loader.batch_sampler.sampler = sampler\n\n def recover_shuffle(self) -> None:\n self.loader.sampler = self.sampler_backup\n if hasattr(self.loader, \"batch_sampler\"):\n self.loader.batch_sampler.sampler = self.sampler_backup\n\n\n__all__ = [\n \"DataModule\",\n \"DLDataModule\",\n \"MLDataset\",\n \"MLLoader\",\n \"CVDataset\",\n \"CVLoader\",\n \"DataLoader\",\n \"get_weighted_indices\",\n]\n"
] | [
[
"numpy.arange",
"numpy.random.shuffle",
"numpy.random.multinomial"
]
] |
TheMarex/charge | [
"85e35f7a6c8b8c161ecd851124d1363d5a450573"
] | [
"src/python/numeric.py"
] | [
"import numpy as np\n\nfrom functions import make_piecewise_linear, PiecewiseFunction, LinearFunction\n\ndef link_consumption(T, f, g, M):\n L = f(T)\n R = g(T)\n max_t_idx = np.iinfo(np.dtype('uint32')).max\n opt_H = np.full_like(L, float('inf'))\n opt_delta_idx = np.full_like(L, max_t_idx, dtype='uint32')\n for d_idx in range(len(L)):\n R_d = np.roll(R, d_idx)\n R_d[:d_idx] = float('inf')\n if L[d_idx] >= float('inf'):\n continue\n H_d = np.maximum(0, L[d_idx] + R_d)\n H_d[R_d >= float('inf')] = float('inf')\n index = opt_H > H_d\n opt_H[index] = H_d[index]\n opt_delta_idx[index] = d_idx\n opt_H[opt_H > M] = float('inf')\n opt_delta_idx[opt_H > M] = max_t_idx\n\n opt_delta = np.full_like(T, float('inf'), dtype='float')\n opt_delta[opt_delta_idx < max_t_idx] = T[opt_delta_idx[opt_delta_idx < max_t_idx]]\n\n d = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_delta), [LinearFunction(0, opt_delta[-1])])))\n h = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_H), [LinearFunction(0, opt_H[-1])])))\n\n return d, h\n\ndef link_charging(T, f, cf, M):\n L = f(T)\n CF = cf(T)\n max_t_idx = np.iinfo(np.dtype('uint32')).max\n opt_H = np.full_like(L, float('inf'))\n opt_delta_idx = np.full_like(L, max_t_idx, dtype='uint32')\n ts = []\n for d_idx in range(len(L)):\n if L[d_idx] >= float('inf'):\n continue\n y = M - L[d_idx]\n if y < 0:\n continue\n assert(y <= M)\n t_idx = np.argmax(CF > y)\n CF_y = np.roll(CF, -t_idx)\n CF_y[-t_idx:] = CF[-1]\n assert(len(CF_y) == len(L))\n CF_d = np.roll(CF_y, d_idx)\n CF_d[:d_idx] = -float('inf')\n H_d = np.maximum(0, M - CF_d)\n index = opt_H > H_d\n opt_H[index] = H_d[index]\n opt_delta_idx[index] = d_idx\n opt_H[opt_H > M] = float('inf')\n opt_delta_idx[opt_H > M] = max_t_idx\n\n print(list(ts))\n\n opt_delta = np.full_like(T, float('inf'), dtype='float')\n opt_delta[opt_delta_idx < max_t_idx] = T[opt_delta_idx[opt_delta_idx < max_t_idx]]\n\n d = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_delta), [LinearFunction(0, opt_delta[-1])])))\n h = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_H), [LinearFunction(0, opt_H[-1])])))\n return d, h\n"
] | [
[
"numpy.full_like",
"numpy.roll",
"numpy.dtype",
"numpy.argmax",
"numpy.maximum"
]
] |
NengLu/topopy | [
"df61e8133ca921daf7d9980d122a2afc5e1ad925"
] | [
"test/temp/test_07_Network_stream_poi.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on September 25, 2018\nTesting suite for topopy.Flow.get_stream_poi() function\n@author: J. Vicente Perez\n@email: [email protected]\n@date: September 25, 2018\n\"\"\"\n\nimport unittest\nimport sys\nimport numpy as np\n# Add to the path code folder and data folder\nsys.path.append(\"../\")\nfrom topopy import Flow, DEM, Network\ninfolder = \"data/in\"\n\nclass StreamPoiTest(unittest.TestCase):\n \n# def test_stream_poi_01(self):\n# dem_files = ['tunez.tif', 'small25.tif', \"jebja30.tif\"]\n# for file in dem_files:\n# dem = DEM(infolder + \"/\" + file)\n# fd = Flow(dem)\n# thr = int(fd.get_ncells() * 0.01)\n# net = Network(fd, dem, thr)\n# \n# out01 = fd.get_stream_poi(thr, \"heads\", \"CELL\")\n# out02 = net.get_stream_poi(\"heads\", \"CELL\")\n# \n# computed = np.array_equal(out01, out02)\n# self.assertEqual(computed, True)\n \n def test_stream_poi_02(self):\n dem_files = ['tunez.tif', 'small25.tif', \"jebja30.tif\"]\n for file in dem_files:\n dem = DEM(infolder + \"/\" + file)\n fd = Flow(dem)\n thr = int(fd.get_ncells() * 0.01)\n net = Network(fd, dem, thr)\n \n out01 = fd.get_stream_poi(thr, \"confluences\", \"CELL\")\n out02 = net.get_stream_poi(\"confluences\", \"CELL\")\n \n computed = np.array_equal(out01, out02)\n print(file)\n self.assertEqual(computed, True)\n\n# \n# def test_stream_poi_03(self):\n# dem_files = ['tunez.tif', 'small25.tif', \"jebja30.tif\"]\n# for file in dem_files:\n# dem = DEM(infolder + \"/\" + file)\n# fd = Flow(dem)\n# thr = int(fd.get_ncells() * 0.01)\n# net = Network(fd, dem, thr)\n# \n# out01 = fd.get_stream_poi(thr, \"outlets\", \"CELL\")\n# out02 = net.get_stream_poi(\"outlets\", \"CELL\")\n# \n# computed = np.array_equal(out01, out02)\n# self.assertEqual(computed, True)\n\n\nif __name__ == \"__main__\":\n unittest.main()"
] | [
[
"numpy.array_equal"
]
] |
Foltrex/bsu | [
"769ddac58eddd5877e40949227998575fd4dec77"
] | [
"architecture/lab3-poisson/task.py"
] | [
"from math import sin\n\nimport numpy as np\nfrom mpi4py import MPI\n\n\nclass Task:\n f = staticmethod(lambda x, y: x * y)\n f_left = f1 = staticmethod(lambda y: y ** 2)\n f_right = f2 = staticmethod(lambda y: sin(y))\n f_bottom = f3 = staticmethod(lambda x: x ** 3)\n f_top = f4 = staticmethod(lambda x: x ** 2)\n\n\nclass Point:\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n def __repr__(self):\n return '({0:.2f}, {1:.2f})'.format(self.x, self.y)\n\n\nclass Index:\n def __init__(self, rows, rows_start, cols, cols_start):\n self.rows, self.rows_start, self.cols, self.cols_start = rows, rows_start, cols, cols_start\n\n\nclass Region:\n def __init__(self, top=0, right=0, bottom=0, left=0):\n self.top, self.right, self.bottom, self.left = top, right, bottom, left\n\n def __repr__(self):\n return '{' \\\n + 't: {0}, r: {1}, b: {2}, l: {3}'.format(self.top, self.right, self.bottom, self.left) \\\n + '}'\n\n\nclass ProcSquare:\n def __init__(self, full_region, region, left_top, step_x, step_y):\n self.full_region, self.region, self.left_top, self.step_x, self.step_y = full_region, region, left_top, step_x, step_y\n self.rows = region.bottom - region.top\n self.cols = region.right - region.left\n self.calc_region = Region(\n top=int(region.top == full_region.top),\n left=int(region.left == full_region.left),\n right=self.cols - int(region.right == full_region.right),\n bottom=self.rows - int(region.bottom == full_region.bottom)\n )\n self.diff = 0.0\n\n if self.rows > 0 and self.cols > 0:\n self.top_border = np.zeros(self.cols, dtype=np.float64)\n self.left_border = np.zeros(self.rows, dtype=np.float64)\n self.bottom_border = np.zeros(self.cols, dtype=np.float64)\n self.right_border = np.zeros(self.rows, dtype=np.float64)\n\n self.sqr_step_x = self.step_x * self.step_x\n self.sqr_step_y = self.step_y * self.step_y\n self.weight = 1. / (2 * (1. / self.sqr_step_x + 1. / self.sqr_step_y))\n\n if self.region.top == self.full_region.top:\n for j in range(self.cols):\n self.set(0, j, Task.f_top(left_top.x + step_x * j))\n else:\n self.neighbor_top_border = np.zeros(self.cols, dtype=np.float64)\n\n if region.bottom == full_region.bottom:\n for j in range(self.cols):\n self.set(self.rows - 1, j, Task.f_bottom(left_top.x + step_x * j))\n else:\n self.neighbor_bottom_border = np.zeros(self.cols, dtype=np.float64)\n\n if region.left == full_region.left:\n for i in range(self.rows):\n self.set(i, 0, Task.f_left(left_top.y + step_y * i))\n else:\n self.neighbor_left_border = np.zeros(self.rows, dtype=np.float64)\n\n if region.right == full_region.right:\n for i in range(self.rows):\n self.set(i, self.cols - 1, Task.f_right(left_top.y + step_y * i))\n else:\n self.neighbor_right_border = np.zeros(self.rows, dtype=np.float64)\n\n if self.rows > 2 and self.cols > 2:\n self.inner_lines = []\n for i in range(self.rows - 2):\n self.inner_lines.append(np.zeros(self.cols - 2, dtype=np.float64))\n\n def get(self, i, j):\n if j == -1:\n return self.neighbor_left_border[i]\n elif j == self.cols:\n return self.neighbor_right_border[i]\n elif i == -1:\n return self.neighbor_top_border[j]\n elif i == self.rows:\n return self.neighbor_bottom_border[j]\n elif j == 0:\n return self.left_border[i]\n elif j == self.cols - 1:\n return self.right_border[i]\n elif i == 0:\n return self.top_border[j]\n elif i == self.rows - 1:\n return self.bottom_border[j]\n else:\n return self.inner_lines[i - 1][j - 1]\n\n def set(self, i, j, val):\n if j == -1:\n self.neighbor_left_border[i] = val\n elif j == self.cols:\n self.neighbor_right_border[i] = val\n elif i == -1:\n self.neighbor_top_border[j] = val\n elif i == self.rows:\n self.neighbor_bottom_border[j] = val\n else:\n if j == 0:\n self.left_border[i] = val\n\n if j == self.cols - 1:\n self.right_border[i] = val\n\n if i == 0:\n self.top_border[j] = val\n\n if i == self.rows - 1:\n self.bottom_border[j] = val\n\n if (0 < i < self.rows - 1) and (0 < j < self.cols - 1):\n self.inner_lines[i - 1][j - 1] = val\n\n def exch(self, comm):\n left, right = comm.Shift(1, 1)\n top, bottom = comm.Shift(0, 1)\n\n if top != MPI.PROC_NULL:\n comm.send(self.top_border, dest=top)\n\n if bottom != MPI.PROC_NULL:\n self.neighbor_bottom_border = comm.recv(source=bottom)\n\n if bottom != MPI.PROC_NULL:\n comm.send(self.bottom_border, dest=bottom)\n\n if top != MPI.PROC_NULL:\n self.neighbor_top_border = comm.recv(source=top)\n\n if right != MPI.PROC_NULL:\n comm.send(self.right_border, dest=right)\n\n if left != MPI.PROC_NULL:\n self.neighbor_left_border = comm.recv(source=left)\n\n if left != MPI.PROC_NULL:\n comm.send(self.left_border, dest=left)\n\n if right != MPI.PROC_NULL:\n self.neighbor_right_border = comm.recv(source=right)\n\n comm.barrier()\n\n def calc(self):\n self.diff = 0.0\n\n for i in range(self.calc_region.top, self.calc_region.bottom):\n for j in range(self.calc_region.left, self.calc_region.right):\n x = self.left_top.x + j * self.step_x\n y = self.left_top.y + i * self.step_y\n val = self.weight * (\n (self.get(i + 1, j) + self.get(i - 1, j)) / self.sqr_step_x +\n (self.get(i, j + 1) + self.get(i, j - 1)) / self.sqr_step_y -\n Task.f(x, y)\n )\n self.diff = max(self.diff, abs(self.get(i, j) - val))\n self.set(i, j, val=val)\n\n def data(self):\n temp = np.zeros(self.cols * self.rows, dtype=np.float64)\n\n for i in range(self.rows):\n for j in range(self.cols):\n temp[i * self.cols + j] = self.get(i, j)\n\n return temp\n"
] | [
[
"numpy.zeros"
]
] |
georgiarichards/otc_codeine | [
"a05a6d23b24c250fb0f8cc5367919a12979870c5"
] | [
"figures_otc.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # This notebook graphs the sales and expenditure data of OTC codeine for 31 countries\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n \n#and make the plots appear in the notebook\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# # Import data\n\n# In[2]:\n\n\ndf1 = pd.read_csv(\"/Users/georgiarichards/Desktop/Python/OTC/data_otc_long.csv\")\ndf1.head()\n\n\n# In[3]:\n\n\ndf1.info()\n\n\n# In[4]:\n\n\ndf1.describe()\n\n\n# # Graphing data\n\n# In[5]:\n\n\n# this code increases the size of the figures \nfig_size = plt.rcParams[\"figure.figsize\"]\nfig_size[0] = 10\nfig_size[1] = 6\nplt.rcParams[\"figure.figsize\"] = fig_size\n\n\n# # COUNTS - all items sold \n\n# In[6]:\n\n\n# doseage units sold per 1000 of the population in 31 countries - adjusted using IQVIA coverage\nfig1a = sns.lineplot(data=df1, \n x='year', y='countpopadj', \n hue='Country',\n palette=\"bright\",\n marker='o') \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 12)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig1a.png')\nfig1a\n\n\n# In[7]:\n\n\n# Now I drop countries ranked 9-31 to graph the top 8 countries for sales volumes\ndf2 = df1\ndf2 = df2.set_index(\"country2\")\ndf2 = df2.drop(\"Serbia\")\ndf2 = df2.drop(\"Switzerland\")\ndf2 = df2.drop(\"Estonia\")\ndf2 = df2.drop(\"Netherlands\")\ndf2 = df2.drop(\"Finland\")\ndf2 = df2.drop(\"Romania\")\ndf2 = df2.drop(\"Bulgaria\")\ndf2 = df2.drop(\"Slovakia\")\ndf2 = df2.drop(\"Slovenia\")\ndf2 = df2.drop(\"Lithuania\")\ndf2 = df2.drop(\"Belgium\")\ndf2 = df2.drop(\"Mexico\")\ndf2 = df2.drop(\"Russia\")\ndf2 = df2.drop(\"Canada\")\ndf2 = df2.drop(\"USA\")\ndf2 = df2.drop(\"Greece\")\ndf2 = df2.drop(\"Thailand\")\ndf2 = df2.drop(\"Germany\")\ndf2 = df2.drop(\"Argentina\")\ndf2 = df2.drop(\"Italy\")\ndf2 = df2.drop(\"Portugal\")\ndf2 = df2.drop(\"Brazil\")\ndf2 = df2.drop(\"Spain\")\ndf2.head()\n\n\n# In[8]:\n\n\n# graphing the top 8 countries [by mean sales of OTC codeine] - adjusted using IQVIA coverage\n\nplt.figure(figsize=(10,6))\n\nfig2a = sns.lineplot(data=df2, \n x=\"year\", y=\"countpopadj\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \n\n\nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig2a.png')\nfig2a\n\n\n# In[9]:\n\n\n# Now I drop countries ranked 1-8 and 17-31 to graph the next 8 countries for sales volumes\ndf3 = df1 \ndf3 = df3.set_index(\"country2\")\ndf3 = df3.drop(\"South Africa\")\ndf3 = df3.drop(\"Ireland\")\ndf3 = df3.drop(\"France\")\ndf3 = df3.drop(\"UK\")\ndf3 = df3.drop(\"Latvia\")\ndf3 = df3.drop(\"Japan\")\ndf3 = df3.drop(\"Croatia\")\ndf3 = df3.drop(\"Poland\")\ndf3 = df3.drop(\"Slovenia\")\ndf3 = df3.drop(\"Lithuania\")\ndf3 = df3.drop(\"Belgium\")\ndf3 = df3.drop(\"Mexico\")\ndf3 = df3.drop(\"Russia\")\ndf3 = df3.drop(\"Canada\")\ndf3 = df3.drop(\"USA\")\ndf3 = df3.drop(\"Greece\")\ndf3 = df3.drop(\"Thailand\")\ndf3 = df3.drop(\"Germany\")\ndf3 = df3.drop(\"Argentina\")\ndf3 = df3.drop(\"Italy\")\ndf3 = df3.drop(\"Portugal\")\ndf3 = df3.drop(\"Brazil\")\ndf3 = df3.drop(\"Spain\")\ndf3.head()\n\n\n# In[10]:\n\n\n# graphing countries ranked 9-16 for mean volume sales of OTC codeine - adjusted with IQVIA coverage\nfig2b = sns.lineplot(data=df3, \n x=\"year\", y=\"countpopadj\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig2b.png')\nfig2b\n\n\n# In[11]:\n\n\n# Now I drop countries ranked 1-16 and -31 to graph the next 8 countries for sales volumes\ndf4 = df1 \ndf4 = df4.set_index(\"country2\")\ndf4 = df4.drop(\"South Africa\")\ndf4 = df4.drop(\"Ireland\")\ndf4 = df4.drop(\"France\")\ndf4 = df4.drop(\"UK\")\ndf4 = df4.drop(\"Latvia\")\ndf4 = df4.drop(\"Japan\")\ndf4 = df4.drop(\"Croatia\")\ndf4 = df4.drop(\"Poland\")\ndf4 = df4.drop(\"Serbia\")\ndf4 = df4.drop(\"Switzerland\")\ndf4 = df4.drop(\"Estonia\")\ndf4 = df4.drop(\"Netherlands\")\ndf4 = df4.drop(\"Finland\")\ndf4 = df4.drop(\"Romania\")\ndf4 = df4.drop(\"Bulgaria\")\ndf4 = df4.drop(\"Slovakia\")\ndf4 = df4.drop(\"Thailand\")\ndf4 = df4.drop(\"Germany\")\ndf4 = df4.drop(\"Argentina\")\ndf4 = df4.drop(\"Italy\")\ndf4 = df4.drop(\"Portugal\")\ndf4 = df4.drop(\"Brazil\")\ndf4 = df4.drop(\"Spain\")\ndf4.head()\n\n\n# In[12]:\n\n\n# graphing countries ranked 17-25 for mean volume sales of OTC codeine \nfig3 = sns.lineplot(data=df4, \n x=\"year\", y=\"countpop\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig3.png')\nfig3\n\n\n# In[13]:\n\n\n# Now I drop countries for the last 8\ndf5 = df1 \ndf5 = df5.set_index(\"country2\")\ndf5 = df5.drop(\"South Africa\")\ndf5 = df5.drop(\"Ireland\")\ndf5 = df5.drop(\"France\")\ndf5 = df5.drop(\"UK\")\ndf5 = df5.drop(\"Latvia\")\ndf5 = df5.drop(\"Japan\")\ndf5 = df5.drop(\"Croatia\")\ndf5 = df5.drop(\"Poland\")\ndf5 = df5.drop(\"Serbia\")\ndf5 = df5.drop(\"Switzerland\")\ndf5 = df5.drop(\"Estonia\")\ndf5 = df5.drop(\"Netherlands\")\ndf5 = df5.drop(\"Finland\")\ndf5 = df5.drop(\"Romania\")\ndf5 = df5.drop(\"Bulgaria\")\ndf5 = df5.drop(\"Slovakia\")\ndf5 = df5.drop(\"Slovenia\")\ndf5 = df5.drop(\"Lithuania\")\ndf5 = df5.drop(\"Belgium\")\ndf5 = df5.drop(\"Mexico\")\ndf5 = df5.drop(\"Russia\")\ndf5 = df5.drop(\"Canada\")\ndf5 = df5.drop(\"USA\")\ndf5 = df5.drop(\"Greece\")\ndf5.head()\n\n\n# In[14]:\n\n\n# graphing countries ranked 9-16 for mean volume sales of OTC codeine \nfig4 = sns.lineplot(data=df5, \n x=\"year\", y=\"countpop\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted doseage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig4.png')\nfig4\n\n\n# # Public expenditure \n\n# In[15]:\n\n\n# this graphs the public expenditure for all 31 countries - adjusted with IQVIA coverage\nfig5 = sns.lineplot(data=df1, \n x='year', y='costpopadj', \n hue='Country',\n palette=\"bright\",\n marker=\"o\") \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted public expenditure (£) per 1,000 population\", fontsize= 12)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig5.png')\nfig5\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
marcdemers/pytorch_geometric_temporal | [
"2c99d690cf183e6c9e7ff40d15ba2f8b875c1aaf"
] | [
"torch_geometric_temporal/nn/recurrent/gconv_gru.py"
] | [
"import torch\nfrom torch_geometric.nn import ChebConv\n\n\nclass GConvGRU(torch.nn.Module):\n r\"\"\"An implementation of the Chebyshev Graph Convolutional Gated Recurrent Unit\n Cell. For details see this paper: `\"Structured Sequence Modeling with Graph\n Convolutional Recurrent Networks.\" <https://arxiv.org/abs/1612.07659>`_\n\n Args:\n in_channels (int): Number of input features.\n out_channels (int): Number of output features.\n K (int): Chebyshev filter size :math:`K`.\n normalization (str, optional): The normalization scheme for the graph\n Laplacian (default: :obj:`\"sym\"`):\n\n 1. :obj:`None`: No normalization\n :math:`\\mathbf{L} = \\mathbf{D} - \\mathbf{A}`\n\n 2. :obj:`\"sym\"`: Symmetric normalization\n :math:`\\mathbf{L} = \\mathbf{I} - \\mathbf{D}^{-1/2} \\mathbf{A}\n \\mathbf{D}^{-1/2}`\n\n 3. :obj:`\"rw\"`: Random-walk normalization\n :math:`\\mathbf{L} = \\mathbf{I} - \\mathbf{D}^{-1} \\mathbf{A}`\n\n You need to pass :obj:`lambda_max` to the :meth:`forward` method of\n this operator in case the normalization is non-symmetric.\n :obj:`\\lambda_max` should be a :class:`torch.Tensor` of size\n :obj:`[num_graphs]` in a mini-batch scenario and a\n scalar/zero-dimensional tensor when operating on single graphs.\n You can pre-compute :obj:`lambda_max` via the\n :class:`torch_geometric.transforms.LaplacianLambdaMax` transform.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, K: int,\n normalization: str=\"sym\", bias: bool=True):\n super(GConvGRU, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.K = K\n self.normalization = normalization\n self.bias = bias\n self._create_parameters_and_layers()\n\n\n def _create_update_gate_parameters_and_layers(self):\n\n self.conv_x_z = ChebConv(in_channels=self.in_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n self.conv_h_z = ChebConv(in_channels=self.out_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n\n def _create_reset_gate_parameters_and_layers(self):\n\n self.conv_x_r = ChebConv(in_channels=self.in_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n self.conv_h_r = ChebConv(in_channels=self.out_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n\n def _create_candidate_state_parameters_and_layers(self):\n\n self.conv_x_h = ChebConv(in_channels=self.in_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n self.conv_h_h = ChebConv(in_channels=self.out_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n\n def _create_parameters_and_layers(self):\n self._create_update_gate_parameters_and_layers()\n self._create_reset_gate_parameters_and_layers()\n self._create_candidate_state_parameters_and_layers()\n\n\n def _set_hidden_state(self, X, H):\n if H is None:\n H = torch.zeros(X.shape[0], self.out_channels)\n return H\n\n\n def _calculate_update_gate(self, X, edge_index, edge_weight, H):\n Z = self.conv_x_z(X, edge_index, edge_weight)\n Z = Z + self.conv_h_z(H, edge_index, edge_weight)\n Z = torch.sigmoid(Z)\n return Z\n\n\n def _calculate_reset_gate(self, X, edge_index, edge_weight, H):\n R = self.conv_x_r(X, edge_index, edge_weight)\n R = R + self.conv_h_r(H, edge_index, edge_weight)\n R = torch.sigmoid(R) \n return R\n\n\n def _calculate_candidate_state(self, X, edge_index, edge_weight, H, R):\n H_tilde = self.conv_x_h(X, edge_index, edge_weight)\n H_tilde = H_tilde + self.conv_h_h(H*R, edge_index, edge_weight)\n H_tilde = torch.tanh(H_tilde)\n return H_tilde\n\n\n def _calculate_hidden_state(self, Z, H, H_tilde):\n H = Z*H + (1-Z)*H_tilde\n return H\n\n\n def forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor,\n edge_weight: torch.FloatTensor=None, H: torch.FloatTensor=None) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass. If edge weights are not present the forward pass\n defaults to an unweighted graph. If the hidden state matrix is not present\n when the forward pass is called it is initialized with zeros.\n\n Arg types:\n * **X** *(PyTorch Float Tensor)* - Node features.\n * **edge_index** *(PyTorch Long Tensor)* - Graph edge indices.\n * **edge_weight** *(PyTorch Long Tensor, optional)* - Edge weight vector.\n * **H** *(PyTorch Float Tensor, optional)* - Hidden state matrix for all nodes.\n\n Return types:\n * **H** *(PyTorch Float Tensor)* - Hidden state matrix for all nodes.\n \"\"\"\n H = self._set_hidden_state(X, H)\n Z = self._calculate_update_gate(X, edge_index, edge_weight, H)\n R = self._calculate_reset_gate(X, edge_index, edge_weight, H)\n H_tilde = self._calculate_candidate_state(X, edge_index, edge_weight, H, R)\n H = self._calculate_hidden_state(Z, H, H_tilde)\n return H\n"
] | [
[
"torch.zeros",
"torch.sigmoid",
"torch.tanh"
]
] |
Exorust/Discrete-Event-Simulation | [
"1d5d43c88521db7c0e010966f6df685256492d71"
] | [
"Process Generation Script.py"
] | [
"import numpy as np\n# exp_dist <-> f(x, beta) = (1/beta) * exp(-(1/beta) * x)\nbeta = 10\nprocess_count = 20\na=np.round(np.random.exponential(scale=beta, size=(process_count,2)))\nnp.savetxt(\"process.csv\", a, delimiter=\",\",fmt=\"%i\")\n\n# Generates Arrival time and burst time\n"
] | [
[
"numpy.savetxt",
"numpy.random.exponential"
]
] |
waterahr/HR-Net | [
"52f8d9d8837fca1307aff4df4ed676cab2bb296a"
] | [
"src/network/hiarBayesGoogLenet_gap_v4.py"
] | [
"import sys\nsys.path.append(\"..\")\nimport os\nfrom keras.models import Model\nfrom keras.layers import Activation, Input, Flatten, Dense, Dropout, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, Lambda\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.layers.recurrent import LSTM\nfrom keras.utils import plot_model\n#from spp.spp.SpatialPyramidPooling import SpatialPyramidPooling\nimport keras.backend as K\nimport numpy as np\nfrom keras.models import Sequential\n\n\n\nclass hiarBayesGoogLeNet:\n @staticmethod\n def Conv2d_BN(x, nb_filter, kernel_size, padding='same', strides=(1,1), name=None):\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name\n else:\n bn_name = None\n conv_name = None\n \n x = Conv2D(nb_filter, kernel_size, padding=padding, strides=strides, activation='relu', name=conv_name)(x)\n x = BatchNormalization(axis=3, name=bn_name)(x)\n return x\n \n @staticmethod\n def Inception(x, nb_filter, name=None):\n \"\"\"\n branch1x1 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter, (1,1), padding='same', strides=(1,1), name=name)\n \n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter, (1,1), padding='same', strides=(1,1), name=name)\n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(branch3x3, nb_filter,(3,3), padding='same', strides=(1,1), name=name)\n \n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter, (1,1), padding='same', strides=(1,1),name=name)\n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(branch5x5, nb_filter, (5,5), padding='same', strides=(1,1), name=name)\n \n branchpool = MaxPooling2D(pool_size=(3,3), strides=(1,1), padding='same')(x)\n branchpool = hiarBayesGoogLeNet.Conv2d_BN(branchpool, nb_filter, (1,1), padding='same', strides=(1,1), name=name)\n \"\"\"\n branch1x1 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter[0], (1,1), padding='same', strides=(1,1), name=name+'_1x1')\n \n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter[1], (1,1), padding='same', strides=(1,1), name=name+'_3x3_reduce')\n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(branch3x3, nb_filter[2],(3,3), padding='same', strides=(1,1), name=name+'_3x3')\n \n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter[3], (1,1), padding='same', strides=(1,1),name=name+'5x5_reduce')\n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(branch5x5, nb_filter[4], (5,5), padding='same', strides=(1,1), name=name+'_5x5')\n \n branchpool = MaxPooling2D(pool_size=(3,3), strides=(1,1), padding='same')(x)\n branchpool = hiarBayesGoogLeNet.Conv2d_BN(branchpool, nb_filter[5], (1,1), padding='same', strides=(1,1), name=name+'_pool_proj')\n \n x = concatenate([branch1x1, branch3x3, branch5x5, branchpool], axis=3)\n \n return x\n\n \"\"\"\n @staticmethod\n def SPP(x, pooling_regions):\n dim_ordering = K.image_dim_ordering()\n assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'\n if dim_ordering == 'th':\n input_shape = (num_channels, None, None)\n elif dim_ordering == 'tf':\n input_shape = (None, None, num_channels)\n model = Sequential()\n model.add(SpatialPyramidPooling(pooling_regions, input_shape=input_shape))\n \n return model.predict(x)\n \"\"\"\n \n\n @staticmethod\n def build(width, height, depth, classes, pooling_regions = [1, 3], weights=\"imagenet\"):\n assert(isinstance(classes, list), 'Must be list type.')\n assert(len(classes) == 3, 'Must be 3 elements in the list.')\n inpt = Input(shape=(width, height, depth))\n #padding = 'same',填充为(步长-1)/2,还可以用ZeroPadding2D((3,3))\n x = hiarBayesGoogLeNet.Conv2d_BN(inpt, 64, (7,7), strides=(2,2), padding='same', name=\"conv1_7x7_s2\")\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n x = hiarBayesGoogLeNet.Conv2d_BN(x, 192, (3,3), strides=(1,1), padding='same', name=\"conv2_3x3\")\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, 64, name=\"inception_3a\")#256\n x = hiarBayesGoogLeNet.Inception(x, 120, name=\"inception_3b\")#480\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, [64,96,128,16,32,32], name=\"inception_3a\")#256\n x = hiarBayesGoogLeNet.Inception(x, [128,128,192,32,96,64], name=\"inception_3b\")#480\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, 128, name=\"inception_4a\")#512\n x = hiarBayesGoogLeNet.Inception(x, 128, name=\"inception_4b\")\n x = hiarBayesGoogLeNet.Inception(x, 128, name=\"inception_4c\")\n x = hiarBayesGoogLeNet.Inception(x, 132, name=\"inception_4d\")#528\n x = hiarBayesGoogLeNet.Inception(x, 208, name=\"inception_4e\")#832\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, [192,96,208,16,48,64], name=\"inception_4a\")#512\n fea_low = x\n #fea_low = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv1_e')(x)\n #fea_low = GlobalAveragePooling2D()(x)#, name=\"gap_low\"\n #fea_low = Dense(512, activation='relu')(fea_low)\n x = hiarBayesGoogLeNet.Inception(x, [160,112,224,24,64,64], name=\"inception_4b\")\n x = hiarBayesGoogLeNet.Inception(x, [128,128,256,24,64,64], name=\"inception_4c\")\n x = hiarBayesGoogLeNet.Inception(x, [112,144,288,32,64,64], name=\"inception_4d\")#528\n fea_mid = x\n #fea_mid = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv2_e')(x)\n #fea_mid = GlobalAveragePooling2D()(x)#, name=\"gap_mid\"\n #fea_mid = Dense(512, activation='relu')(fea_mid)\n x = hiarBayesGoogLeNet.Inception(x, [256,160,320,32,128,128], name=\"inception_4e\")#832\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, 208, name=\"inception_5a\")\n x = hiarBayesGoogLeNet.Inception(x, 256, name=\"inception_5b\")#1024\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, [256,160,320,32,128,128], name=\"inception_5a\")\n x = hiarBayesGoogLeNet.Inception(x, [384,192,384,48,128,128], name=\"inception_5b\")#1024\n fea_hig = x\n #fea_hig = Conv2D(1024, (3, 3), padding='same', activation='relu', name='conv3_e')(x)\n #fea_hig = GlobalAveragePooling2D()(x)#, name=\"gap_hig\"\n #fea_hig = Dense(1024, activation='relu')(fea_hig)\n \"\"\"\n predictions_low = Dense(classes[0], name=\"low\", activation=\"sigmoid\")(fea_low)#\n predictions_mid_hs = Dense(classes[1], name=\"middle_hs\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_ub = Dense(classes[2], name=\"middle_ub\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_lb = Dense(classes[3], name=\"middle_lb\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_sh = Dense(classes[4], name=\"middle_sh\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_at = Dense(classes[5], name=\"middle_at\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_ot = Dense(classes[6], name=\"middle_ot\", activation=\"sigmoid\")(fea_mid)#\n predictions_hig = Dense(classes[7], name=\"high_fea\", activation=\"sigmoid\")(fea_hig)#\n \"\"\"\n fea_low = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_low)\n #fea_low = Flatten()(fea_low)\n #fea_low = Dense(512, activation='relu')(fea_low)\n fea_low = GlobalAveragePooling2D()(fea_low)\n predictions_low = Dense(classes[0], name=\"low\", activation=\"sigmoid\")(fea_low)#\n fea_mid_ub = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_mid)\n #fea_mid_ub = Flatten()(fea_mid_ub)\n #fea_mid_ub = Dense(512, activation='relu')(fea_mid_ub)\n fea_mid_ub = GlobalAveragePooling2D()(fea_mid_ub)\n predictions_mid_ub = Dense(classes[1], name=\"middle_ub\", activation=\"sigmoid\")(fea_mid_ub)#\n fea_mid_lb = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_mid)\n #fea_mid_lb = Flatten()(fea_mid_lb)\n #fea_mid_lb = Dense(512, activation='relu')(fea_mid_lb)\n fea_mid_lb = GlobalAveragePooling2D()(fea_mid_lb)\n predictions_mid_lb = Dense(classes[2], name=\"middle_lb\", activation=\"sigmoid\")(fea_mid_lb)#\n fea_mid_sh = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_mid)\n #fea_mid_sh = Flatten()(fea_mid_sh)\n #fea_mid_sh = Dense(512, activation='relu')(fea_mid_sh)\n fea_mid_sh = GlobalAveragePooling2D()(fea_mid_sh)\n predictions_mid_sh = Dense(classes[3], name=\"middle_sh\", activation=\"sigmoid\")(fea_mid_sh)#\n #fea_mid_ot = Flatten()(fea_mid_ot)\n #fea_mid_ot = Dense(512, activation='relu')(fea_mid_ot)\n #fea_mid_ot = GlobalAveragePooling2D()(fea_mid_ot)\n #predictions_mid_ot = Dense(classes[6], name=\"middle_ot\", activation=\"sigmoid\")(fea_mid_ot)#\n fea_hig = Conv2D(1024, (3, 3), padding='same', activation='relu')(fea_hig)\n #fea_hig = Flatten()(fea_hig)\n #fea_hig = Dense(512, activation='relu')(fea_hig)\n fea_hig = GlobalAveragePooling2D()(fea_hig)\n predictions_hig = Dense(classes[4], name=\"high_fea\", activation=\"sigmoid\")(fea_hig)\n #\"\"\"\n \"\"\"PCM2018\"\"\"\n #predictions_hig = Dense(classes[2], activation=\"sigmoid\", name=\"high\")(concatenate([fea_low, fea_mid, fea_hig], axis=1))\n \"\"\"PCM2018\"\"\"\n predictions_priori = concatenate([predictions_low, predictions_mid_ub, predictions_mid_lb, predictions_mid_sh], axis=1)\n \"\"\"mar\"\"\"\n #val = np.load(\"../results/state_transition_matrix.npy\")\n #state_transition_matrix = K.variable(value=val, dtype='float32', name='state_transition_matrix')\n #predictions_hig_cond = Lambda(lambda x:K.dot(x, state_transition_matrix), name=\"high_cond\")(predictions_priori)\n \"\"\"mar\"\"\"\n predictions_hig_cond = Dense(classes[4], activation=\"sigmoid\", name=\"high_cond\")(predictions_priori)\n #predictions_priori = K.reshape(concatenate([predictions_low, predictions_mid], axis=1), (-1, classes[0]+classes[1], 1))\n #predictions_hig_cond = LSTM(classes[2], activation=\"sigmoid\", name=\"high_cond\")(predictions_priori)\n predictions_hig_posterior = Lambda(lambda x:x[1] * x[0], name=\"high\")([predictions_hig_cond, predictions_hig])\n #predictions_hig_posterior = Lambda(lambda x:K.sigmoid(K.tanh((x[1] - 0.5) * np.pi) * x[0]), name=\"high\")([predictions_hig_cond, predictions_hig])\n #multi#Lambda(lambda x:x[0] * x[1], name=\"high_post\")([predictions_hig_cond, predictions_hig])\n #cond#Dense(classes[2], activation=\"sigmoid\", name=\"high_post\")(concatenate([predictions_hig, predictions_hig_cond], axis=1))\n #add#Lambda(lambda x:(x[0] + x[1])/2, name=\"high_post\")([predictions_hig_cond, predictions_hig])\n \"\"\"\"mar\"\"\"\n #predictions_low = Activation(\"sigmoid\")(predictions_low)\n #predictions_mid = Activation(\"sigmoid\")(predictions_mid)\n #predictions_hig_posterior = Activation(\"sigmoid\")(predictions_hig_posterior)\n \"\"\"mar\"\"\"\n #predictions = concatenate([predictions_low, predictions_mid, predictions_hig_posterior], axis=1)\n \"\"\"PCM2018\"\"\"\n #predictions = concatenate([predictions_low, predictions_mid, predictions_hig], axis=1)\n \"\"\"PCM2018\"\"\"\n \"\"\"\n predictions_low = Dense(classes[0], activation=\"sigmoid\", name=\"low\")(fea_low)\n predictions_mid_fea = Dense(classes[1], activation=\"sigmoid\", name=\"middle_fea\")(fea_mid)\n predictions_mid_cond = Dense(classes[1], activation=\"sigmoid\", name=\"middle_cond\")(predictions_low)\n predictions_mid = Lambda(lambda x:(x[0] + x[1])/2, name=\"mid\")([predictions_mid_fea, predictions_mid_cond])\n predictions_hig_fea = Dense(classes[2], activation=\"sigmoid\", name=\"high_fea\")(fea_hig)\n predictions_priori = concatenate([predictions_low, predictions_mid], axis=1)\n predictions_hig_cond = Dense(classes[2], activation=\"sigmoid\", name=\"high_cond\")(predictions_priori)\n predictions_hig = Lambda(lambda x:(x[0] + x[1])/2, name=\"high_post\")([predictions_hig_cond, predictions_hig_fea])\n predictions = concatenate([predictions_low, predictions_mid, predictions_hig], axis=1)\n \"\"\"\n \"\"\"\n x = concatenate([spp_low, spp_mid, spp_hig], axis=1)#2048\n #x = AveragePooling2D(pool_size=(7,7), strides=(7,7), padding='same')(x)\n x = Dropout(0.4)(x)\n x = Dense(2048, activation='relu')(x)\n x = Dense(classes, activation='softmax')(x)\n \"\"\"\n # create the model\n model = Model(inpt, [predictions_low, predictions_mid_ub, predictions_mid_lb, predictions_mid_sh, predictions_hig_posterior], name='inception')\n if weights == \"imagenet\":\n weights = np.load(\"../results/googlenet_weights.npy\", encoding='latin1').item()\n for layer in model.layers:\n if layer.get_weights() == []:\n continue\n #weight = layer.get_weights()\n if layer.name in weights:\n #print(layer.name, end=':')\n #print(layer.get_weights()[0].shape == weights[layer.name]['weights'].shape, end=' ')\n #print(layer.get_weights()[1].shape == weights[layer.name]['biases'].shape)\n layer.set_weights([weights[layer.name]['weights'], weights[layer.name]['biases']])\n # return the constructed network architecture\n return model\n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n model = hiarBayesGoogLeNet.build(160, 75, 3, [10, 20, 30])#因为googleNet默认输入32*32的图片\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.summary()\n plot_model(model, to_file=\"../../results/models/hiarBayesGoogleLenet.png\", show_shapes=True)"
] | [
[
"numpy.load"
]
] |
rabaneda/S1chain | [
"b2c0c2efc6b8b09c92f66d5e10074f3c1df04e03"
] | [
"source/winddir.py"
] | [
"\"\"\"\nCreated on Wed May 20 15:52:47 2020\n\n@author: Alberto\n\"\"\"\n\nimport numpy as np\nimport scipy.stats as st\nfrom nc_methods import NetCDFManager\nimport warnings\n\n#------------------------------------------------------------------------------\n\nclass WIND(NetCDFManager):\n \n kernels = {'op_sobel':np.array([[3,0,3], [10,0,-10], [3,0,-3]])*(1/32.)}\n ampli = 'Amplitude_VV'\n sigma = 'Sigma0_VV'\n incidence = 'incidenceAngleFromEllipsoid' # in degrees\n \n wspd_name = 'Wind speed'\n wspd_attr = {'Long_name': 'Neutral wind speed at 10 metres over the still water level', \n 'Standard_name':'Neutral wind speed at 10 m',\n 'units':'m/s',\n 'resolution':'1 km',\n 'scale_factor':10}\n \n sigma_calc_name = 'NRCS calc'\n sigma_calc_attr = {'Long_name': 'Calculated Normalised Radar Cross Section', \n 'Standard_name':'Sigma nought calculated',\n 'units':'m/s',\n 'resolution':'100 m',\n 'scale_factor':1}\n \n sigma_obs_name = 'NRCS obs'\n sigma_obs_attr = {'Long_name': 'Observed Normalised Radar Cross Section', \n 'Standard_name':'Sigma nought observed',\n 'units':'m/s',\n 'resolution':'100 m',\n 'scale_factor':1}\n \n wdir_name = 'Wind direction'\n wdir_attr = {'Long_name': 'Wind direction with 180 degrees of ambiguity', \n 'Standard_name':'Wind direction',\n 'units':'degrees',\n 'resolution':'4 km',\n 'scale_factor':40}\n \n R_name = 'Alignment'\n R_attr = {'Long_name': 'Mean Resultant Length', \n 'Standard_name':'Alignment',\n 'units':'none',\n 'resolution':'4 km',\n 'scale_factor':40}\n \n ME_name = 'Marginal error'\n ME_attr = {'Long_name': 'Marginal Error of the Mean Resultant Vector', \n 'Standard_name':'Marginal Error',\n 'units':'degrees',\n 'resolution':'4 km',\n 'scale_factor':40}\n \n \n def get_phase_matrix(self, amplitude_matrix):\n \n real = self.convolution_fourier(amplitude_matrix, WIND.kernels['op_sobel'])\n img = self.convolution_fourier(amplitude_matrix, np.transpose(WIND.kernels['op_sobel']))\n lg = real + (img*1j)\n phases = np.angle(lg)\n return phases\n \n def get_direction(self, arr, confidence):\n '''Returns mean direction, mean resultant vector and marginal error'''\n '''array must be contain axial directional data'''\n \n array = arr.flatten()\n angle = np.arctan2(np.mean(np.sin(array)), np.mean(np.cos(array)))*0.5\n R = np.power((np.mean(np.cos(array))**2)+(np.mean(np.sin(array))**2), 0.5)\n print (array.shape)\n print (np.mean(array)*confidence,np.mean(array)*(1-confidence))\n med = st.scoreatpercentile(array, 50, limit=(np.mean(array)*confidence,np.mean(array)*(1-confidence)))\n alpha = np.mean(np.cos(4*(array-angle)))\n ME = 0.5*(np.arcsin(med*np.power((1-alpha)/(2*len(array)*(R**2)), 0.5)))\n return (np.degrees(angle), R, ME)\n \n def get_direction_matrix(self, confidence=0.05, threshold=15, progressive_multilook=False):\n '''Returns mean direction array, mean resultant vector array and marginal error array'''\n ''' confidence: int, 0 to 1. Percintile to remove from its freq. distribution\n Default 0.05, which means it will remove values within distribution\n from 0 to 0.05 anf from 0.95 to 1.\n thershold: int, in degrees. Maximum marginal error in degrees to accept.\n Default is 15 degrees.\n progessive multilook: Boolean, default is False, each imagette is\n independent of the others, pixels belong to only one imagette.\n If True, pixels will belong to multiple imagettes at the same time\n since imagattes will overlap because an imagette is created for each \n pixel independently of multilook value; N of pixels = N of imagettes.\n Each pixel will belong to multiple imagettes, but there will be\n one imagette where this pixel will be the centre of the imagette'''\n \n axial = 2*self.get_phase_matrix(self.get_var_array(self.ds, WIND.ampli))\n angle_matrix = np.zeros(shape=axial.shape)\n R_matrix = np.zeros(shape=axial.shape)\n ME_matrix = np.zeros(shape=axial.shape)\n \n subimages = list(self.gen_imagettes(axial, multilook=WIND.wdir_attr['scale_factor'], progressive_multilook=progressive_multilook))\n for roi in subimages:\n angle, R, ME = self.get_direction(roi[0], confidence=confidence)\n if ME > threshold:\n angle = np.nan\n \n if len(roi[1]) == 2:\n angle_matrix[roi[1][0],roi[1][1]] = angle\n R_matrix[roi[1][0], roi[1][1]] = R\n ME_matrix[roi[1][0], roi[1][1]] = ME\n elif len(roi[1]) == 4:\n angle_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = angle\n R_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = R\n ME_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = ME\n \n \n self.add_var(self.out, WIND.wdir_name, angle_matrix, WIND.wdir_attr)\n if self.inter == True:\n self.add_var(self.out, WIND.R_name, R_matrix, WIND.R_attr)\n self.add_var(self.out, WIND.ME_name, ME_matrix, WIND.ME_attr)\n\n \n def cmod5n_forward(self,v,phi,theta):\n '''! ---------\n ! cmod5n_forward(v, phi, theta)\n ! inputs:\n ! v in [m/s] wind velocity (always >= 0)\n ! phi in [deg] angle between azimuth and wind direction\n ! (= D - AZM)\n ! theta in [deg] incidence angle\n ! output:\n ! CMOD5_N NORMALIZED BACKSCATTER (LINEAR)\n !\n ! All inputs must be Numpy arrays of equal sizes\n !\n ! A. STOFFELEN MAY 1991 ECMWF CMOD4\n ! A. STOFFELEN, S. DE HAAN DEC 2001 KNMI CMOD5 PROTOTYPE\n ! H. HERSBACH JUNE 2002 ECMWF COMPLETE REVISION\n ! J. de Kloe JULI 2003 KNMI, rewritten in fortan90\n ! A. Verhoef JAN 2008 KNMI, CMOD5 for neutral winds\n ! K.F.Dagestad OCT 2011 NERSC, Vectorized Python version\n !---------------------------------------------------------------------\n '''\n # Ignore overflow errors for wind calculations over land\n warnings.simplefilter(\"ignore\", RuntimeWarning) \n \n DTOR = 57.29577951\n THETM = 40.\n THETHR = 25.\n ZPOW = 1.6\n \n # NB: 0 added as first element below, to avoid switching from 1-indexing to 0-indexing\n C = [0, -0.6878, -0.7957, 0.3380, -0.1728, 0.0000, 0.0040, 0.1103, 0.0159, \n 6.7329, 2.7713, -2.2885, 0.4971, -0.7250, 0.0450, \n 0.0066, 0.3222, 0.0120, 22.7000, 2.0813, 3.0000, 8.3659,\n -3.3428, 1.3236, 6.2437, 2.3893, 0.3249, 4.1590, 1.6930]\n Y0 = C[19]\n PN = C[20]\n A = C[19]-(C[19]-1)/C[20]\n \n B = 1./(C[20]*(C[19]-1.)**(3-1))\n \n # ! ANGLES\n FI=phi/DTOR\n CSFI = np.cos(FI)\n CS2FI= 2.00 * CSFI * CSFI - 1.00\n \n X = (theta - THETM) / THETHR\n XX = X*X\n \n # ! B0: FUNCTION OF WIND SPEED AND INCIDENCE ANGLE\n A0 =C[ 1]+C[ 2]*X+C[ 3]*XX+C[ 4]*X*XX\n A1 =C[ 5]+C[ 6]*X\n A2 =C[ 7]+C[ 8]*X\n \n GAM=C[ 9]+C[10]*X+C[11]*XX\n S0 =C[12]+C[13]*X\n \n # V is missing! Using V=v as substitute, this is apparently correct\n V=v\n S = A2*V\n S_vec = S.copy() \n SlS0 = [S_vec<S0]\n S_vec[SlS0]=S0[SlS0]\n A3=1./(1.+np.exp(-S_vec))\n SlS0 = (S<S0)\n A3[SlS0]=A3[SlS0]*(S[SlS0]/S0[SlS0])**( S0[SlS0]*(1.- A3[SlS0]))\n #A3=A3*(S/S0)**( S0*(1.- A3))\n B0=(A3**GAM)*10.**(A0+A1*V)\n \n # ! B1: FUNCTION OF WIND SPEED AND INCIDENCE ANGLE\n B1 = C[15]*V*(0.5+X-np.tanh(4.*(X+C[16]+C[17]*V)))\n B1 = C[14]*(1.+X)- B1\n B1 = B1/(np.exp( 0.34*(V-C[18]) )+1.)\n \n # ! B2: FUNCTION OF WIND SPEED AND INCIDENCE ANGLE\n V0 = C[21] + C[22]*X + C[23]*XX\n D1 = C[24] + C[25]*X + C[26]*XX\n D2 = C[27] + C[28]*X\n \n V2 = (V/V0+1.)\n V2ltY0 = V2<Y0\n V2[V2ltY0] = A+B*(V2[V2ltY0]-1.)**PN\n B2 = (-D1+D2*V2)*np.exp(-V2)\n \n # ! CMOD5_N: COMBINE THE THREE FOURIER TERMS\n CMOD5_N = B0*(1.0+B1*CSFI+B2*CS2FI)**ZPOW\n return CMOD5_N\n \n \n def cmod5n_inverse(self, sigma0_obs, phi, incidence, iterations=10):\n '''! ---------\n ! cmod5n_inverse(sigma0_obs, phi, incidence, iterations)\n ! inputs:\n ! sigma0_obs Normalized Radar Cross Section [linear units]\n ! phi in [deg] angle between azimuth and wind direction\n ! (= D - AZM)\n ! incidence in [deg] incidence angle\n ! iterations: number of iterations to run\n ! output:\n ! Wind speed, 10 m, neutral stratification \n !\n ! All inputs must be Numpy arrays of equal sizes\n !\n ! This function iterates the forward CMOD5N function\n ! until agreement with input (observed) sigma0 values \n !---------------------------------------------------------------------\n '''\n # Ignore overflow errors for wind calculations over land\n warnings.simplefilter(\"ignore\", RuntimeWarning) \n # First guess wind speed\n V = np.array([10.])*np.ones(sigma0_obs.shape);\n step=10.\n \n # Iterating until error is smaller than threshold\n for iterno in range(1, iterations):\n #print iterno\n sigma0_calc = self.cmod5n_forward(V, phi, incidence)\n ind = sigma0_calc-sigma0_obs>0\n V = V + step\n V[ind] = V[ind] - 2*step \n step = step/2\n \n #mdict={'s0obs':sigma0_obs,'s0calc':sigma0_calc}\n #from scipy.io import savemat\n #savemat('s0test',mdict)\n \n if self.inter == False:\n return (V)\n elif self.inter == True:\n return (V, sigma0_obs, sigma0_calc)\n \n def get_speed_matrix(self):\n \n azimuth = float(self.ds.attrs['azimuth_direction'])\n if self.inter == False:\n speed = self.cmod5n_inverse(self.downsampling_2D(self.get_var_array(self.ds, WIND.sigma), multilook=WIND.wspd_attr['scale_factor']), \n self.downsampling_2D(self.get_var_array(self.out, WIND.wdir_name), multilook=WIND.wspd_attr['scale_factor'])-azimuth, \n self.downsampling_2D(self.get_var_array(self.ds, WIND.incidence)), multilook=WIND.wspd_attr['scale_factor'])\n self.add_var(self.out, WIND.wspd_name, speed, WIND.wspd_attr)\n elif self.inter == True:\n speed, sigma0_obs, sigma0_calc = self.cmod5n_inverse(self.downsampling_2D(self.get_var_array(self.ds, WIND.sigma), multilook=WIND.wspd_attr['scale_factor']), \n self.downsampling_2D(self.get_var_array(self.out, WIND.wdir_name), multilook=WIND.wspd_attr['scale_factor'])-azimuth, \n self.downsampling_2D(self.get_var_array(self.ds, WIND.incidence), multilook=WIND.wspd_attr['scale_factor']))\n self.add_var(self.out, WIND.wspd_name, speed, WIND.wspd_attr)\n self.add_var(self.out, WIND.sigma_obs_name, sigma0_obs, WIND.sigma_obs_attr)\n self.add_var(self.out, WIND.sigma_calc_name, sigma0_calc, WIND.sigma_calc_attr)\n"
] | [
[
"numpy.degrees",
"numpy.ones",
"numpy.transpose",
"numpy.zeros",
"numpy.cos",
"numpy.exp",
"numpy.angle",
"numpy.array",
"numpy.sin",
"numpy.tanh",
"numpy.mean"
]
] |
jsandersen/CMT | [
"1be6e36b9a6042386395bc654c9dd4b579e6ce6d"
] | [
"training/Toxic_CNN2_MCD.py"
] | [
"import tensorflow as tf\ntf.compat.v1.disable_v2_behavior()\n\nfrom src.datasets.toxic import Toxic\nfrom src.datasets.util import splits\nfrom src.models.cnn2 import getCNN2\nfrom src.models.embedding import * \nfrom src.models.predict import predict_mcdropout\nimport yaml\n\nimport pandas as pd\nimport tensorflow as tf\n\nfrom gensim import models\n\n# load conifig\nwith open('config.yaml', 'r') as f:\n conf = yaml.load(f)\nWord2VecModelPath = conf[\"Word2VecModelPath\"]\n\n# config\nRANDOM_STATE = 1\n\nMAX_SEQUENCE_LENGTH = 500\n\nNUM_SPLITS = 5\nSPLIT_SIZE = 10000\n\ndef build():\n\n \n # get data\n print('read data ...')\n \n toxic = Toxic(clean=True)\n X_train, y_train, X_test, y_test, X_eval, y_eval, word_index = toxic.getRawDataSplits(n_splits=5, test_size=SPLIT_SIZE, random_state=1)\n \n print('create embedding ...')\n # embedding\n w = models.KeyedVectors.load_word2vec_format(Word2VecModelPath, binary=True)\n \n embeddings_index, embedding_dim = get_embeddings_index(w)\n \n w = None\n \n # training\n print('train model ...')\n \n models_n = []\n\n for i in range(NUM_SPLITS):\n model = tf.keras.models.load_model(f'models/toxic/CNN2_BL_{i}')\n models_n.append(model)\n\n # predict\n print('evaluate ...')\n dfs = []\n for m in range(NUM_SPLITS):\n dfs_parts = []\n s = 2500\n j = s\n for i in range(0, SPLIT_SIZE, s):\n dfs_n = predict_mcdropout(models_n[m], X_eval[i:j], y_eval[i:j])\n dfs_parts.append(dfs_n)\n print('#', i, j)\n j+=s\n dfs.append(pd.concat([*dfs_parts], ignore_index=True))\n\n # save\n print('save as dataframe ...')\n name = 'CNN2_MCD'\n i = 0\n for df in dfs:\n df.to_pickle(f\"pickle/toxic/df_{name}_{i}.pkl\")\n i = i+1"
] | [
[
"tensorflow.compat.v1.disable_v2_behavior",
"pandas.concat",
"tensorflow.keras.models.load_model"
]
] |
arti1117/python-machine-learning-pandas-data-analytics | [
"132b0f3326aeb028348bc9e07d38d18e4ec2e18e"
] | [
"PART04/23_matplotlib_pie.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 9 19:22:34 2020\n\n@author: arti\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nplt.style.use('default')\n\ndf = pd.read_csv('./auto-mpg.csv', header=None)\n\ndf.columns = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight',\n 'acceleration', 'model year', 'origin', 'name']\n\ndf['count'] = 1\ndf_origin = df.groupby('origin').sum()\nprint(df_origin.head())\n\ndf_origin.index = ['USA', 'EU', 'JPN']\n\ndf_origin['count'].plot(kind='pie',\n figsize=(7, 5),\n autopct='%1.1f%%',\n startangle=10,\n colors=['chocolate', 'bisque', 'cadetblue']\n )\n\nplt.title('Model Origin', size=20)\nplt.axis('equal')\nplt.legend(labels=df_origin.index, loc='upper right')\nplt.show()"
] | [
[
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
]
] |
andypbarrett/nsidc-seaice | [
"167a16309f7eaadd5c613b54a7df26eb1f48c2f3"
] | [
"seaice/data/test/test_regression/test_api.py"
] | [
"from unittest.mock import patch\nimport datetime as dt\nimport os\nimport unittest\n\nfrom nose.tools import assert_equals, assert_true, assert_false, assert_raises\nimport numpy as np\nimport numpy.testing as npt\nimport pandas as pd\n\nimport seaice.data as sid\nimport seaice.data.api as api\nimport seaice.data.errors as e\nimport seaice.data.getter as getter\nimport seaice.data.gridset_filters as gf\nimport seaice.nasateam as nt\n\n\nTEST_ROOT = [os.path.join(\n os.path.dirname(__file__),\n os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir,\n 'test_data',\n 'seaice.data'\n)]\n\n\nclass Test_concentration_daily(unittest.TestCase):\n\n def test_concentration_daily(self):\n result = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=7, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n def test_missing_day_returns_empty_grid(self):\n result = sid.concentration_daily(hemisphere=nt.NORTH, year=2002,\n month=1, day=1, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_true(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n def test_missing_day_raises_when_asked_to(self):\n assert_raises(e.SeaIceDataNoData, sid.concentration_daily,\n hemisphere=nt.NORTH, year=2002,\n month=1, day=1, search_paths=TEST_ROOT,\n allow_empty_gridset=False)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.locator.daily_file_path')\n def test_with_bad_date_and_empty_gridset_not_allowed(self,\n mock_daily_file_path,\n mock_get_bad_days_for_hemisphere,\n mock__concentration_gridset_by_filelist):\n files = ['doesnt_matter1.bin',\n 'doesnt_matter2.bin'\n 'doesnt_matter3.bin']\n mock_daily_file_path.return_value = files\n shape = (5, 5, 2)\n missing = 255\n mock__concentration_gridset_by_filelist.return_value = {\n 'data': np.full(shape, missing, dtype=np.int),\n 'metadata': {\n 'period_index': pd.period_range('1980-10-21', '1980-10-23', freq='D'),\n 'missing_value': 255,\n 'files': files\n }\n }\n\n bad_dates = pd.period_range('1980-10-20', '1980-10-27', freq='D')\n mock_get_bad_days_for_hemisphere.return_value = bad_dates\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n 1980, 10, 25,\n ['/who/cares'],\n interpolation_radius=0,\n allow_empty_gridset=False,\n allow_bad_dates=False)\n\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.gridset_filters._interpolate_missing')\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n def test_daily_multiple_files_interpolated(self, mock_daily_file_path,\n _mockgridset_by_filelist, mock__interpolate_missing,\n mock_get_bad_days_for_hemisphere):\n mock_get_bad_days_for_hemisphere.return_value = []\n\n files = ['nt_20150831_n07_v1.1_s.bin',\n 'nt_20150901_n07_v1.1_s.bin',\n 'nt_20150902_n07_v1.1_s.bin']\n gridset = {'data': np.full((2, 2, 3), 2, dtype=np.int),\n 'metadata': {'files': files,\n 'period_index': pd.period_range(start='2015-08-31',\n end='2015-09-02',\n freq='D')}}\n\n mock_daily_file_path.return_value = files\n _mockgridset_by_filelist.return_value = gridset\n\n interpolated = np.full((2, 2), 2, dtype=np.int)\n mock__interpolate_missing.return_value = interpolated\n\n hemisphere = nt.NORTH\n search_paths = ['/anyroot']\n # act\n sid.concentration_daily(hemisphere, 2015, 9, 1, search_paths, interpolation_radius=1)\n\n # assert\n getter._concentration_gridset_by_filelist.assert_called_with(files)\n\n npt.assert_array_equal(mock__interpolate_missing.call_args[0][0], interpolated)\n npt.assert_array_equal(mock__interpolate_missing.call_args[0][1],\n np.full((2, 2, 2), 2, dtype=np.int))\n\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.gridset_filters._interpolate_missing')\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n def test_no_interpolation_needed_only_includes_file_for_date(self,\n mock_daily_file_path,\n mock__gridset_by_filelist,\n mock__interpolate_missing,\n mock_get_bad_days_for_hemisphere):\n mock_get_bad_days_for_hemisphere.return_value = []\n\n files = ['nt_20112131_n07_v1.1_s.bin',\n 'nt_20120101_n07_v1.1_s.bin',\n 'nt_20120102_n07_v1.1_s.bin']\n gridset = {'data': np.full((2, 2, 3), 4, dtype=np.int),\n 'metadata': {'files': files,\n 'period_index': pd.period_range(start='2011-12-31',\n periods=3,\n freq='D')}}\n\n mock_daily_file_path.return_value = files\n mock__gridset_by_filelist.return_value = gridset\n\n mock__interpolate_missing.return_value = np.full((2, 2), 4, dtype=np.int)\n\n interpolation_radius = 1\n\n nt_hemi = nt.NORTH\n actual_gridset = sid.concentration_daily(nt_hemi,\n 2012,\n 1,\n 1,\n ['/anypaths'],\n interpolation_radius=interpolation_radius)\n actual = actual_gridset['metadata']['files']\n\n expected = ['nt_20120101_n07_v1.1_s.bin']\n\n self.assertEqual(actual, expected)\n\n\nclass Test_concentration_daily_average_over_date_range(unittest.TestCase):\n def test_concentration_daily_average_over_date_range(self):\n date_range = pd.DatetimeIndex(['2001-01-06', '2001-01-07'])\n result = sid.concentration_daily_average_over_date_range('N',\n date_range,\n search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n def test_different_from_each_day(self):\n date_range = pd.DatetimeIndex(['2001-01-06', '2001-01-07'])\n first = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=6, search_paths=TEST_ROOT)\n last = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=7, search_paths=TEST_ROOT)\n average = sid.concentration_daily_average_over_date_range('N',\n date_range,\n search_paths=TEST_ROOT)\n\n self.assertFalse(np.all(average['data'] == first['data']))\n self.assertFalse(np.all(average['data'] == last['data']))\n\n\nclass Test_concentration_daily___failed_qa_logic(unittest.TestCase):\n\n def setUp(self):\n self.day_before_grid = np.full(nt.NORTH['shape'], 1, dtype=np.int)\n\n target_grid = np.full(nt.NORTH['shape'], 2, dtype=np.int)\n target_grid[0:3, 0:3] = nt.FLAGS['missing']\n self.target_grid = target_grid.copy()\n\n self.day_after_grid = np.full(nt.NORTH['shape'], 11, dtype=np.int)\n\n self.cube = np.dstack([self.day_before_grid, target_grid, self.day_after_grid])\n\n target_grid[0:3, 0:3] = (1 + 11) / 2\n self.interpolated_grid = target_grid.copy()\n\n self.empty_grid = np.full(nt.NORTH['shape'], nt.FLAGS['missing'], dtype=np.int)\n\n self.target_date = dt.date(1980, 10, 25)\n\n self.period_index = pd.period_range(start='1980-10-24', end='1980-10-26', freq='D')\n\n self.file_list = ['nt_19801024_n07_v1.1_n.bin',\n 'nt_19801025_n07_v1.1_n.bin',\n 'nt_19801026_n07_v1.1_n.bin']\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_good_day_interpolates_with_good_days_with_allow_bad_dates_false_and_empty_false(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = []\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.interpolated_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = self.file_list\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_good_day_doesnt_interpolate_with_bad_days(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [\n pd.Period(self.target_date - dt.timedelta(1), 'D'),\n pd.Period(self.target_date + dt.timedelta(1), 'D')\n ]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.target_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = self.file_list[1:2]\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_when_interpolation_attempt_with_all_bad_days_and_disallowing_bad(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [\n pd.Period(self.target_date - dt.timedelta(1), 'D'),\n pd.Period(self.target_date, 'D'),\n pd.Period(self.target_date + dt.timedelta(1), 'D')\n ]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_bad_day_interpolates_with_good_days_despite_disallowing_bad(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n expected_grid = np.full(nt.NORTH['shape'], 6, dtype=np.int)\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = [self.file_list[0], self.file_list[2]]\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_exception_with_no_data_to_interpolate(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = True\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = []\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.empty_grid,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_exception_with_bad_data(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 0\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = self.file_list[1:2]\n period_index = self.period_index[1:2]\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.target_grid,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_exception_with_no_data(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path):\n allow_empty_gridset = False\n allow_bad_dates = True\n interpolation_radius = 0\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = []\n mock_daily_file_path.return_value = file_list\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_returns_interpolated_bad_data_gridset(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_bad_dates = True\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.interpolated_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = self.file_list\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_returns_empty_grid_when_all_bad_and_disallowed_bad_but_empty_allowed(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [\n pd.Period(self.target_date - dt.timedelta(1), 'D'),\n pd.Period(self.target_date, 'D'),\n pd.Period(self.target_date + dt.timedelta(1), 'D')\n ]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.empty_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = []\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.locator.daily_file_path')\n def test_with_bad_date_and_empty_gridset_allowed(self,\n mock_daily_file_path,\n mock_get_bad_days_for_hemisphere,\n mock__concentration_gridset_by_filelist):\n allow_bad_dates = False\n files = ['files.1_s.bin']\n mock_daily_file_path.return_value = files\n\n bad_dates = pd.period_range('1980-10-20', '1980-10-27', freq='D')\n mock_get_bad_days_for_hemisphere.return_value = bad_dates\n\n gridset = {'data': self.target_grid,\n 'metadata': {'files': files,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n 1980,\n 10,\n 25,\n ['/who/cares'],\n interpolation_radius=0,\n allow_bad_dates=allow_bad_dates)\n expected = np.full((448, 304), 255, dtype=np.int)\n\n npt.assert_array_equal(actual['data'], expected)\n\n\nclass Test_extent_daily(unittest.TestCase):\n\n def test_calls_ok(self):\n result = sid.extent_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=7, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n\nclass Test_extent_daily_median(unittest.TestCase):\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_calls_ok(self, mock_get_bad_days):\n mock_get_bad_days.return_value = []\n result = sid.extent_daily_median(hemisphere=nt.NORTH, start_year=2001, end_year=2002,\n dayofyear=7, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_equals(expected, actual)\n\n\nclass Test_concentration_monthly(unittest.TestCase):\n\n def test_concentration_monthly_with_insufficent_daily_files(self):\n result = sid.concentration_monthly(hemisphere=nt.NORTH, year=2001,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=True)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_true(np.all(result['data'] == 255.))\n assert_equals(result['metadata']['empty_gridset'], True)\n assert_equals(expected, actual)\n\n def test_concentration_monthly_with_sufficient_daily_files(self):\n result = sid.concentration_monthly(hemisphere=nt.NORTH, year=2001,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=True,\n min_days_for_valid_month=2)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n assert_equals(result['metadata'].get('empty_gridset', False), False)\n\n def test_missing_month_returns_empty_grid(self):\n result = sid.concentration_monthly(hemisphere=nt.NORTH, year=2002,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=True)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_true(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n assert_equals(result['metadata']['empty_gridset'], True)\n\n def test_missing_month_raises_when_asked_to(self):\n assert_raises(e.SeaIceDataNoData, sid.concentration_monthly,\n hemisphere=nt.NORTH, year=2002,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=False)\n\n\nclass Test_extent_monthly(unittest.TestCase):\n\n def test_calls_ok(self):\n result = sid.extent_monthly(hemisphere=nt.NORTH, year=2001, month=1, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_equals(expected, actual)\n\n\nclass Test_extent_monthly_median(unittest.TestCase):\n\n def test_calls_ok(self):\n result = sid.extent_monthly_median(hemisphere=nt.NORTH, start_year=2001, end_year=2002,\n month=1, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_equals(expected, actual)\n\n\nclass Test__filters(unittest.TestCase):\n def test_no_params(self):\n actual = api._filters()\n expected = []\n self.assertEqual(actual, expected)\n\n @patch('seaice.data.api.functools.partial')\n def test_drop_land(self, mock_partial):\n actual = api._filters(drop_land=True)\n expected = [mock_partial.return_value]\n\n mock_partial.assert_called_once_with(gf.drop_land, nt.FLAGS['land'], nt.FLAGS['coast'])\n self.assertEqual(actual, expected)\n\n def test_bad_dates(self):\n actual = api._filters(allow_bad_dates=False)\n expected = [gf.drop_bad_dates]\n self.assertEqual(actual, expected)\n\n def test_interpolate(self):\n actual = api._filters(interpolation_radius=1)\n expected = [gf.interpolate]\n self.assertEqual(actual, expected)\n\n def test_drop_invalid_ice_with_no_other_params(self):\n actual = api._filters(drop_invalid_ice=True)\n expected = []\n self.assertEqual(actual, expected)\n\n @patch('seaice.data.api.functools.partial')\n @patch('seaice.nasateam.invalid_ice_mask')\n def test_drop_invalid_ice_with_normal_params(self,\n mock_invalid_ice_mask,\n mock_partial):\n hemisphere = nt.NORTH\n\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask.return_value = mask\n\n def wrapped_func():\n pass\n mock_partial.return_value = wrapped_func\n\n actual_filters = api._filters(hemisphere=hemisphere,\n month=1,\n drop_invalid_ice=True)\n\n expected_filters = [wrapped_func]\n\n actual_partial_called_with_func = mock_partial.call_args[0][0]\n actual_partial_called_with_mask = mock_partial.call_args[0][1]\n\n mock_invalid_ice_mask.assert_called_once_with(hemisphere, 1)\n self.assertEqual(actual_partial_called_with_func, gf.drop_invalid_ice)\n npt.assert_array_equal(mask, actual_partial_called_with_mask)\n self.assertEqual(actual_filters, expected_filters)\n\n @patch('seaice.data.api.functools.partial')\n @patch('seaice.data.api._invalid_ice_mask_for_median')\n def test_drop_invalid_ice_with_median_params(self,\n mock_invalid_ice_mask_for_median,\n mock_partial):\n hemisphere = nt.NORTH\n\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask_for_median.return_value = mask\n\n def wrapped_func():\n pass\n mock_partial.return_value = wrapped_func\n\n actual_filters = api._filters(hemisphere=hemisphere,\n start_year=2001,\n end_year=2005,\n dayofyear=60,\n drop_invalid_ice=True)\n\n expected_filters = [wrapped_func]\n\n actual_partial_called_with_func = mock_partial.call_args[0][0]\n actual_partial_called_with_mask = mock_partial.call_args[0][1]\n\n mock_invalid_ice_mask_for_median.assert_called_once_with(2001, 2005, 60, hemisphere)\n self.assertEqual(actual_partial_called_with_func, gf.drop_invalid_ice)\n npt.assert_array_equal(mask, actual_partial_called_with_mask)\n self.assertEqual(actual_filters, expected_filters)\n\n def test_prevent_empty(self):\n actual = api._filters(allow_empty_gridset=False)\n expected = [gf.prevent_empty]\n self.assertEqual(actual, expected)\n\n @patch('seaice.nasateam.invalid_ice_mask')\n @patch('seaice.data.api._invalid_ice_mask_for_median')\n def test_order_prevent_empty_is_last(self,\n mock_invalid_ice_mask,\n mock__invalid_ice_mask_for_median):\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask.return_value = mask\n mock__invalid_ice_mask_for_median.return_value = mask\n\n ALLOW_EMPTY_GRIDSET = False\n\n for drop_land in [True, False, None]:\n for allow_bad_dates in [True, False, None]:\n for interpolation_radius in [0, 1]:\n for drop_invalid_ice in [True, False, None]:\n for hemisphere, month, start_year, end_year, dayofyear in [\n (nt.NORTH, 1, None, None, None),\n (nt.NORTH, None, 2000, 2005, 60),\n (None, None, None, None, None)]:\n actual_filters = api._filters(\n hemisphere=hemisphere,\n month=month,\n drop_land=drop_land,\n allow_bad_dates=allow_bad_dates,\n interpolation_radius=interpolation_radius,\n drop_invalid_ice=drop_invalid_ice,\n\n allow_empty_gridset=ALLOW_EMPTY_GRIDSET)\n\n actual_prevent_empty_index = actual_filters.index(gf.prevent_empty)\n expected_prevent_empty_index = len(actual_filters) - 1\n\n self.assertEqual(actual_prevent_empty_index,\n expected_prevent_empty_index)\n\n @patch('seaice.nasateam.invalid_ice_mask')\n @patch('seaice.data.api._invalid_ice_mask_for_median')\n def test_order_drop_bad_dates_before_interpolate(self,\n mock_invalid_ice_mask,\n mock__invalid_ice_mask_for_median):\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask.return_value = mask\n mock__invalid_ice_mask_for_median.return_value = mask\n\n ALLOW_BAD_DATES = False\n INTERPOLATION_RADIUS = 1\n\n for drop_land in [True, False, None]:\n for drop_invalid_ice in [True, False, None]:\n for hemisphere, month, start_year, end_year, dayofyear in [\n (nt.NORTH, 1, None, None, None),\n (nt.NORTH, None, 2000, 2005, 60),\n (None, None, None, None, None)]:\n for allow_empty_gridset in [True, False, None]:\n actual_filters = api._filters(\n hemisphere=hemisphere,\n month=month,\n drop_land=drop_land,\n drop_invalid_ice=drop_invalid_ice,\n allow_empty_gridset=allow_empty_gridset,\n\n allow_bad_dates=ALLOW_BAD_DATES,\n interpolation_radius=INTERPOLATION_RADIUS)\n\n drop_bad_dates_index = actual_filters.index(gf.drop_bad_dates)\n interpolate_index = actual_filters.index(gf.interpolate)\n\n self.assertLess(drop_bad_dates_index, interpolate_index)\n\n\nclass Test__anomaly_gridset(unittest.TestCase):\n pole_hole_value = 251\n\n def _metadata(self):\n flags = {'pole': self.pole_hole_value}\n\n return {\n 'valid_data_range': (0, 100),\n 'files': [],\n 'period_index': pd.PeriodIndex([], freq='M'),\n 'flags': flags\n }\n\n def _climatology_gridset(self, *data):\n return {'data': np.dstack(data), 'metadata': self._metadata()}\n\n def _month_gridset(self, data):\n return {'data': data, 'metadata': self._metadata()}\n\n def test_subtract_average_climatology_from_month(self):\n month_gridset = self._month_gridset(\n np.array([[25, 25],\n [25, 25]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, 15],\n [18, 22]]),\n np.array([[19, 25],\n [20, 16]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[25 - 18, 25 - 20],\n [25 - 19, 25 - 19]])\n npt.assert_array_equal(actual['data'], expected)\n\n def test_preserves_values_outside_valid_range_from_climatology(self):\n month_gridset = self._month_gridset(\n np.array([[25, 10],\n [25, 10]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, 101],\n [17, 101]]),\n np.array([[19, 101],\n [19, 101]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[7, 101],\n [7, 101]])\n npt.assert_array_equal(actual['data'], expected)\n\n def test_preserves_largest_pole_hole_month_gridset(self):\n \"\"\"Tests that the largest pole hole is retained from the\n month_gridset.\n \"\"\"\n month_gridset = self._month_gridset(\n np.array([[self.pole_hole_value, 25],\n [self.pole_hole_value, self.pole_hole_value]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, 15],\n [self.pole_hole_value, self.pole_hole_value]]),\n np.array([[19, 25],\n [self.pole_hole_value, self.pole_hole_value]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[self.pole_hole_value, 25 - 20],\n [self.pole_hole_value, self.pole_hole_value]])\n npt.assert_array_equal(actual['data'], expected)\n\n def test_preserves_largest_pole_hole_climatology_gridset(self):\n \"\"\"Tests that the largest pole hole is retained from the\n climatology_gridset.\n \"\"\"\n month_gridset = self._month_gridset(\n np.array([[25, 25],\n [self.pole_hole_value, self.pole_hole_value]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, self.pole_hole_value],\n [self.pole_hole_value, self.pole_hole_value]]),\n np.array([[19, self.pole_hole_value],\n [self.pole_hole_value, self.pole_hole_value]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[25 - 18, self.pole_hole_value],\n [self.pole_hole_value, self.pole_hole_value]])\n npt.assert_array_equal(actual['data'], expected)\n"
] | [
[
"pandas.period_range",
"numpy.ones",
"pandas.PeriodIndex",
"pandas.DatetimeIndex",
"pandas.Period",
"numpy.testing.assert_array_equal",
"numpy.dstack",
"numpy.all",
"numpy.array",
"numpy.full"
]
] |
shenghh2015/segmentation_models | [
"473c528c724f62ff38ac127747dd8babb7de6b85"
] | [
"translate/train_model.py"
] | [
"import os\nimport cv2\nfrom skimage import io\nimport sys\n# import keras\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nfrom natsort import natsorted\n# sys.path.append('../')\nimport segmentation_models_v1 as sm\nfrom segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, AtUnet, ResUnet\nsm.set_framework('tf.keras')\n\nfrom helper_function import plot_history_flu2, save_phase_fl_history, plot_flu_prediction, plot_set_prediction\nfrom helper_function import save_history_for_callback, plot_history_for_callback\nfrom helper_function import precision, recall, f1_score, calculate_psnr, calculate_pearsonr\nfrom sklearn.metrics import confusion_matrix\n\ndef str2bool(value):\n return value.lower() == 'true'\n\ndef generate_folder(folder_name):\n\tif not os.path.exists(folder_name):\n\t\tos.system('mkdir -p {}'.format(folder_name))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--gpu\", type=str, default = '2')\nparser.add_argument(\"--docker\", type=str2bool, default = True)\nparser.add_argument(\"--net_type\", type=str, default = 'Unet') #Unet, Linknet, PSPNet, FPN\nparser.add_argument(\"--backbone\", type=str, default = 'efficientnetb0')\nparser.add_argument(\"--dataset\", type=str, default = 'neuron_float')\nparser.add_argument(\"--subset\", type=str, default = 'train')\nparser.add_argument(\"--epoch\", type=int, default = 10)\nparser.add_argument(\"--run\", type=int, default = 1)\nparser.add_argument(\"--dim\", type=int, default = 512)\nparser.add_argument(\"--ch_in\", type=int, default = 3)\nparser.add_argument(\"--ch_out\", type=int, default = 3)\nparser.add_argument(\"--fl_ch\", type=str, default = 'fl12')\nparser.add_argument(\"--rot\", type=float, default = 0)\nparser.add_argument(\"--scale\", type=float, default = 100)\nparser.add_argument(\"--train\", type=int, default = None)\nparser.add_argument(\"--act_fun\", type=str, default = 'relu')\nparser.add_argument(\"--loss\", type=str, default = 'mse')\nparser.add_argument(\"--batch_size\", type=int, default = 6)\nparser.add_argument(\"--lr\", type=float, default = 5e-4)\nparser.add_argument(\"--decay\", type=float, default = 0.8)\nparser.add_argument(\"--delta\", type=float, default = 10)\nparser.add_argument(\"--best_select\", type=str2bool, default = True) ## cancel the selection of best model\nparser.add_argument(\"--pre_train\", type=str2bool, default = True)\nargs = parser.parse_args()\nprint(args)\n\nmodel_name = 'Cor-FL1_FL2-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-subset-{}-loss-{}-act-{}-scale-{}-decay-{}-delta-{}-chi-{}-cho-{}-chf-{}-bselect-{}-run-{}'.format(args.net_type, args.backbone, args.pre_train,\\\n\t\t args.epoch, args.batch_size, args.lr, args.dim, args.train, args.rot, args.dataset, args.subset, args.loss, args.act_fun, args.scale, args.decay, args.delta, args.ch_in, args.ch_out, args.fl_ch, args.best_select, args.run)\nprint(model_name)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\nDATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './datasets/{}'.format(args.dataset)\ntrain_dim = args.dim\n\n# load the sample names\ndef read_samples(file_name):\n\t\twith open(file_name, 'r+') as f:\n\t\t\t\tlines = [fn.strip() for fn in f.readlines()]\n\t\treturn lines\n\ndef read_end_points(file_name):\n\t\tsample_dict = {}\n\t\twith open(file_name, 'r+') as f:\n\t\t\t\tfor line in f.readlines():\n\t\t\t\t\t\tsplits = line.strip().split(' ')\n\t\t\t\t\t\tsample_dict[splits[0]] = [int(splits[1]), int(splits[2])]\n\t\treturn sample_dict\n\nsample_dict = None\nif 'neuron' in args.dataset:\n\t\tsample_dict = read_end_points(os.path.join(DATA_DIR, 'range.txt'))\ntrain_fns = read_samples(os.path.join(DATA_DIR, 'train.txt'))\ntest_fns = read_samples(os.path.join(DATA_DIR, 'test.txt'))\ndata_dir = DATA_DIR + '/data'\nval_dim = 1760\n\n# classes for data loading and preprocessing\nclass Dataset:\n \"\"\"CamVid Dataset. Read images, apply augmentation and preprocessing transformations.\n \n Args:\n images_dir (str): path to images folder\n masks_dir (str): path to segmentation masks folder\n class_values (list): values of classes to extract from segmentation mask\n augmentation (albumentations.Compose): data transfromation pipeline \n (e.g. flip, scale, etc.)\n preprocessing (albumentations.Compose): data preprocessing \n (e.g. noralization, shape manipulation, etc.)\n \n \"\"\"\n \n def __init__(\n self, \n data_dir, \n sample_names,\n end_point_dict,\n fl_ch = None,\n scale = 1.0,\n channels = [3,3],\n augmentation=None, \n preprocessing=None,\n ):\n self.images_fps = []\n self.masks1_fps = []\n self.masks2_fps = []\n for sn in sample_names:\n \t\tsample_tag = 'T-' + sn.split('_')[3][5:]\n \t\tif end_point_dict:\n \t\t\t\tend1, end2 = end_point_dict[sample_tag]\n \t\telse:\n \t\t\t\tend1, end2 = 0, np.inf\n \t\tfns = os.listdir(os.path.join(data_dir, sn, 'phase'))\n \t\tfor fn in fns:\n \t\t\t\tif end1 <= int(fn.split('.')[0].split('-')[-1]) <= end2:\n \t\t\t\t\t\tself.images_fps.append(os.path.join(data_dir, sn, 'phase', fn))\n \t\t\t\t\t\tself.masks1_fps.append(os.path.join(data_dir, sn, 'fl1', fn))\n \t\t\t\t\t\tself.masks2_fps.append(os.path.join(data_dir, sn, 'fl2', fn))\n self.ids = self.images_fps\n print('Load files: image {}, fl1: {}, fl2:{}'.format(len(self.images_fps),len(self.masks1_fps),len(self.masks2_fps))) \n self.scale = scale\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n self.channels = channels\n self.fl_ch = fl_ch\n \n def __getitem__(self, i):\n \n # load image and fl1 or fl2 or both\n image = np.load(self.images_fps[i]) * 255.\n if self.fl_ch == 'fl1':\n mask = np.load(self.masks1_fps[i])\n mask = mask * self.scale\n elif self.fl_ch == 'fl2':\n mask = np.load(self.masks2_fps[i])\n mask = mask * self.scale \n elif self.fl_ch == 'fl12':\n mask1 = np.load(self.masks1_fps[i])\n mask2 = np.load(self.masks2_fps[i])\n mask = np.stack([mask1[:,:,1], mask2[:,:,1]], axis = -1)\n mask = mask*self.scale\n \n # decide the input and output channels\n if self.channels[0] == 1:\n image[:,:,0], image[:,:,2] = image[:,:,1], image[:,:,1]\n elif self.channels[0] == 2:\n image[:,:,2] = image[:,:,1]\t\t\n \n if self.channels[1] == 1 and not (self.fl_ch=='fl12'):\n mask = mask[:,:,1:2]\n\n # apply augmentations\n if self.augmentation:\n sample = self.augmentation(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n\n # apply preprocessing\n if self.preprocessing:\n sample = self.preprocessing(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n\n return image, mask\n \n def __len__(self):\n return len(self.ids)\n \n\nclass Dataloder(tf.keras.utils.Sequence):\n \"\"\"Load data from dataset and form batches\n \n Args:\n dataset: instance of Dataset class for image loading and preprocessing.\n batch_size: Integet number of images in batch.\n shuffle: Boolean, if `True` shuffle image indexes each epoch.\n \"\"\"\n \n def __init__(self, dataset, batch_size=1, shuffle=False):\n self.dataset = dataset\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.indexes = np.arange(len(dataset))\n\n self.on_epoch_end()\n\n def __getitem__(self, i):\n \n # collect batch data\n start = i * self.batch_size\n stop = (i + 1) * self.batch_size\n data = []\n for j in range(start, stop):\n data.append(self.dataset[j])\n \n # transpose list of lists\n batch = [np.stack(samples, axis=0) for samples in zip(*data)]\n \n return (batch[0], batch[1])\n \n def __len__(self):\n \"\"\"Denotes the number of batches per epoch\"\"\"\n return len(self.indexes) // self.batch_size\n \n def on_epoch_end(self):\n \"\"\"Callback function to shuffle indexes each epoch\"\"\"\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)\n\nimport albumentations as A\n\ndef round_clip_0_1(x, **kwargs):\n return x.round().clip(0, 1)\n\n# define heavy augmentations\ndef get_training_augmentation(dim, rot = 0):\n train_transform = [\n A.HorizontalFlip(p=0.5),\n A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),\n A.RandomCrop(height=dim, width=dim, always_apply=True),]\n return A.Compose(train_transform)\n\n\ndef get_validation_augmentation(dim = 992):\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\n test_transform = [\n A.PadIfNeeded(dim, dim)\n ]\n return A.Compose(test_transform)\n\ndef get_preprocessing(preprocessing_fn):\n \"\"\"Construct preprocessing transform\n \n Args:\n preprocessing_fn (callbale): data normalization function \n (can be specific for each pretrained neural network)\n Return:\n transform: albumentations.Compose\n \n \"\"\"\n _transform = [\n A.Lambda(image=preprocessing_fn),\n ]\n return A.Compose(_transform)\n\n## create models\nBACKBONE = args.backbone\nBATCH_SIZE = args.batch_size\nLR = args.lr\nEPOCHS = args.epoch\n\n# processing configuration\npreprocess_input = sm.get_preprocessing(BACKBONE)\n\n# define network parameters\nn_classes = args.ch_out if args.fl_ch == 'fl1' or args.fl_ch == 'fl2' else 2\nactivation = '{}'.format(args.act_fun)\n\n#create model\nnet_func = globals()[args.net_type]\n\nencoder_weights='imagenet' if args.pre_train else None\n\nmodel = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation)\n\n# define optomizer\noptim = tf.keras.optimizers.Adam(LR)\n\nif args.loss == 'mse':\n\tloss = tf.keras.losses.MSE\nelif args.loss == 'mae':\n\tloss = tf.keras.losses.MAE\nelif args.loss == 'huber':\n\tloss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.NONE)\n\nfrom tensorflow.keras import backend as K\ndef pearson(y_true, y_pred):\n x = y_true\n y = y_pred\n mx = K.mean(x)\n my = K.mean(y)\n xm, ym = x-mx, y-my\n r_num = K.sum(tf.multiply(xm,ym))\n r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))\n r = r_num / r_den\n return r\n\nmetrics = [sm.metrics.PSNR(max_val=args.scale), pearson]\n\n# compile keras model with defined optimozer, loss and metrics\nmodel.compile(optim, loss, metrics)\n\n# Dataset for train images\ntrain_dataset = Dataset(\n\t\tdata_dir = data_dir,\n\t\tsample_names = train_fns,\n\t\tend_point_dict = sample_dict,\n fl_ch = args.fl_ch,\n channels = [args.ch_in, args.ch_out],\n scale = args.scale,\n augmentation=get_training_augmentation(train_dim, args.rot),\n preprocessing=get_preprocessing(preprocess_input),\n)\n\n# Dataset for validation images\nvalid_dataset = Dataset(\n\t\tdata_dir = data_dir,\n\t\tsample_names = test_fns,\n\t\tend_point_dict = sample_dict,\n fl_ch = args.fl_ch,\n scale = args.scale,\n channels = [args.ch_in, args.ch_out],\n augmentation=get_validation_augmentation(val_dim),\n preprocessing=get_preprocessing(preprocess_input),\n)\n\ntrain_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\nvalid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)\n\nprint(train_dataloader[0][0].shape)\nprint(train_dataloader[0][1].shape)\nprint(train_dataloader[0][1].min(), train_dataloader[0][1].max())\n# check shapes for errors\nassert train_dataloader[0][0].shape == (BATCH_SIZE, train_dim, train_dim, 3)\nassert train_dataloader[0][1].shape == (BATCH_SIZE, train_dim, train_dim, n_classes)\n\nmodel_folder = '/data/2d_models/{}/{}'.format(args.dataset, model_name) if args.docker else './2d_models/{}/{}'.format(args.dataset, model_name)\ngenerate_folder(model_folder)\n\ndef concat_tile(im_list_2d):\n return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])\n\ndef save_images(file_name, vols):\n\t\tvols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]\n\t\tshp = vols.shape\n\t\tls, lx, ly = shp\n\t\tsx, sy = int(lx/128), int(ly/128)\n\t\tvols = vols[:,::sx,::sy]\n\t\tslice_list, rows = [], []\n\t\tfor si in range(vols.shape[0]):\n\t\t\t\tslice = vols[si,:,:]\n\t\t\t\tslice[0, :] = 255\n\t\t\t\tslice[:, 0] = 255\n\t\t\t\tslice[:, -1] = 255\n\t\t\t\tslice[-1, :] = 255\n\t\t\t\trows.append(slice)\n\t\t\t\tif si%8 == 7 and not si == vols.shape[0]-1:\n\t\t\t\t\t\tslice_list.append(rows)\n\t\t\t\t\t\trows = []\n\t\tsave_img = concat_tile(slice_list)\t\t\n\t\tcv2.imwrite(file_name, save_img)\n\nclass HistoryPrintCallback(tf.keras.callbacks.Callback):\n\t\tdef __init__(self):\n\t\t\t\tsuper(HistoryPrintCallback, self).__init__()\n\t\t\t\tself.history = {}\n\n\t\tdef on_epoch_end(self, epoch, logs=None):\n\t\t\t\tif logs:\n\t\t\t\t\t\tfor key in logs.keys():\n\t\t\t\t\t\t\t\tif epoch == 0:\n\t\t\t\t\t\t\t\t\t\tself.history[key] = []\n\t\t\t\t\t\t\t\tself.history[key].append(logs[key])\n\t\t\t\tif epoch%5 == 0:\n\t\t\t\t\t\tplot_history_for_callback(model_folder+'/train_history.png', self.history)\n\t\t\t\t\t\tsave_history_for_callback(model_folder, self.history)\n\t\t\t\t\t\timg_vols, gt_vols, pr_vols = [],[],[]\n\t\t\t\t\t\tfor i in range(0, len(valid_dataset),int(len(valid_dataset)/64)):\n\t\t\t\t\t\t\t\timg_vols.append(np.load(valid_dataloader.dataset.images_fps[i]))\n\t\t\t\t\t\t\t\tgt_vols.append(valid_dataloader[i][1])\n\t\t\t\t\t\t\t\tpr_vols.append(self.model.predict(valid_dataloader[i]))\n\t\t\t\t\t\timg_vols = np.stack(img_vols, axis = 0)\n\t\t\t\t\t\tgt_vols = np.concatenate(gt_vols, axis = 0)\n\t\t\t\t\t\tpr_vols = np.concatenate(pr_vols, axis = 0)\n\t\t\t\t\t\tsave_images(model_folder+'/epoch-{}-img.png'.format(epoch), np.uint8(img_vols))\n\t\t\t\t\t\tsave_images(model_folder+'/epoch-{}-gt.png'.format(epoch), gt_vols/args.scale*255)\n\t\t\t\t\t\tsave_images(model_folder+'/epoch-{}-pr.png'.format(epoch), pr_vols/args.scale*255)\n\n\nif not args.best_select:\n\t\tcallbacks = [\n\t\t\t\ttf.keras.callbacks.ModelCheckpoint(model_folder+'/weights_{epoch:02d}.h5', save_weights_only=True, save_best_only=False, period=5),\n\t\t\t\ttf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),\n\t\t\t\tHistoryPrintCallback(),\n\t\t]\nelse:\n\t\tcallbacks = [\n\t\t\t\ttf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', monitor='val_pearson', save_weights_only=True, save_best_only=True, mode='max'),\n\t\t\t\ttf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),\n\t\t\t\tHistoryPrintCallback(),\n\t\t]\n\n\n# train model\nhistory = model.fit_generator(\n train_dataloader, \n steps_per_epoch=len(train_dataloader), \n epochs=EPOCHS, \n callbacks=callbacks, \n validation_data=valid_dataloader, \n validation_steps=len(valid_dataloader),\n)\n\n# evaluate model\ntest_dataset = Dataset(\n x_test_dir, \n y1_test_dir,\n y2_test_dir,\n fl_ch = args.fl_ch,\n channels = [args.ch_in, args.ch_out],\n scale = args.scale,\n augmentation=get_validation_augmentation(val_dim),\n preprocessing=get_preprocessing(preprocess_input),\n)"
] | [
[
"numpy.load",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.losses.Huber",
"tensorflow.keras.backend.mean",
"numpy.random.permutation",
"tensorflow.multiply",
"tensorflow.keras.backend.square",
"numpy.stack",
"numpy.concatenate",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.uint8"
]
] |
tetradsensors/tetrad-viz-toolkit | [
"908bed14e676143c4a0b1812d90aed6501479653"
] | [
"viztools/tools/snapshot.py"
] | [
"import numpy as np\nfrom viztools.tools import generate_image\n\nclass Snapshot:\n \"\"\"\n Formalized view of the data saved in our database. This just cleans it up\n and makes sure everything is correct before it can be used elsewhere. \n \"\"\"\n def __init__(self,\n lats,\n lons,\n alts,\n vals,\n vars,\n generate_img=False,\n opac95=3,\n opac05=12,\n colormap='auto',\n timestamp=None,\n param=\"PM2.5\"): # Placeholder, will change once estimate maps are for more metrics\n\n self.lats = np.array(lats).reshape(-1,)\n self.lons = np.array(lons).reshape(-1,)\n self.alts = np.array(alts)\n self.vals = np.array(vals)\n self.vars = np.array(vars)\n self.param = param\n self.timestamp = timestamp\n\n assert self.lats.shape[0] == self.vals.shape[1]\n assert self.lons.shape[0] == self.vals.shape[0]\n assert self.alts.shape == self.vals.shape == self.vars.shape\n\n if generate_img:\n # PIL.Image\n self.img = generate_image._snapshot_to_img_dist_scaled(self,\n largest_size=1300, \n scaling='epa', \n opac95=opac95, \n opac05=opac05,\n colormap=colormap)\n else:\n self.img = None\n\n\n\n"
] | [
[
"numpy.array"
]
] |
mederrata/probability | [
"89d248c420b8ecabfd9d6de4a1aa8d3886920049"
] | [
"tensorflow_probability/python/vi/optimization_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for variational optimization.\"\"\"\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\n\nJAX_MODE = False\n\n\n@test_util.test_all_tf_execution_regimes\nclass OptimizationTests(test_util.TestCase):\n\n @test_util.jax_disable_variable_test\n def test_variational_em(self):\n\n seed = test_util.test_seed()\n\n num_samples = 10000\n mu, sigma = 3., 5.\n x = test_util.test_np_rng().randn(num_samples) * sigma + mu\n\n # Test that the tape automatically picks up any trainable variables in\n # the model, even though it's just a function with no explicit\n # `.trainable_variables`\n likelihood_scale = tfp.util.TransformedVariable(\n 1., tfb.Softplus(), name='scale')\n def trainable_log_prob(z):\n lp = tfd.Normal(0., 1.).log_prob(z)\n lp += tf.reduce_sum(tfd.Normal(\n z[..., tf.newaxis], likelihood_scale).log_prob(x), axis=-1)\n return lp\n\n # For this simple normal-normal model, the true posterior is also normal.\n z_posterior_precision = (1./sigma**2 * num_samples + 1.**2)\n z_posterior_stddev = np.sqrt(1./z_posterior_precision)\n z_posterior_mean = (1./sigma**2 * num_samples * mu) / z_posterior_precision\n\n q_loc = tf.Variable(0., name='mu')\n q_scale = tfp.util.TransformedVariable(1., tfb.Softplus(), name='q_scale')\n q = tfd.Normal(q_loc, q_scale)\n loss_curve = tfp.vi.fit_surrogate_posterior(\n trainable_log_prob, q,\n num_steps=1000,\n sample_size=10,\n optimizer=tf.optimizers.Adam(0.1),\n seed=seed)\n self.evaluate(tf1.global_variables_initializer())\n with tf.control_dependencies([loss_curve]):\n final_q_loc = tf.identity(q.mean())\n final_q_scale = tf.identity(q.stddev())\n final_likelihood_scale = tf.convert_to_tensor(likelihood_scale)\n\n # We expect to recover the true posterior because the variational family\n # includes the true posterior, and the true parameters because we observed\n # a large number of sampled points.\n final_likelihood_scale_, final_q_loc_, final_q_scale_ = self.evaluate((\n final_likelihood_scale, final_q_loc, final_q_scale))\n self.assertAllClose(final_likelihood_scale_, sigma, atol=0.2)\n self.assertAllClose(final_q_loc_, z_posterior_mean, atol=0.2)\n self.assertAllClose(final_q_scale_, z_posterior_stddev, atol=0.1)\n\n @test_util.jax_disable_variable_test\n def test_importance_sampling_example(self):\n init_seed, opt_seed, eval_seed = tfp.random.split_seed(\n test_util.test_seed(sampler_type='stateless'), n=3)\n\n def log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n conditioned_log_prob = lambda z: log_prob(z, x=5.)\n\n q_z = tfp.experimental.util.make_trainable(tfd.Normal, seed=init_seed)\n # Fit `q` with an importance-weighted variational loss.\n loss_curve = tfp.vi.fit_surrogate_posterior(\n conditioned_log_prob,\n surrogate_posterior=q_z,\n importance_sample_size=10,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n num_steps=100,\n seed=opt_seed)\n self.evaluate(tf1.global_variables_initializer())\n loss_curve = self.evaluate(loss_curve)\n\n # Estimate posterior statistics with importance sampling.\n zs, q_log_prob = self.evaluate(q_z.experimental_sample_and_log_prob(\n 1000, seed=eval_seed))\n self_normalized_log_weights = tf.nn.log_softmax(\n conditioned_log_prob(zs) - q_log_prob)\n posterior_mean = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * zs,\n axis=0)\n self.assertAllClose(posterior_mean, 2.5, atol=1e-1)\n\n posterior_variance = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * (zs - posterior_mean)**2,\n axis=0)\n self.assertAllClose(posterior_variance, 0.5, atol=1e-1)\n\n # Test reproducibility\n q_z_again = tfp.experimental.util.make_trainable(tfd.Normal, seed=init_seed)\n # Fit `q` with an importance-weighted variational loss.\n loss_curve_again = tfp.vi.fit_surrogate_posterior(\n conditioned_log_prob,\n surrogate_posterior=q_z_again,\n importance_sample_size=10,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n num_steps=100,\n seed=opt_seed)\n self.evaluate(tf1.global_variables_initializer())\n loss_curve_again = self.evaluate(loss_curve_again)\n self.assertAllClose(loss_curve_again, loss_curve)\n\n @test_util.jax_disable_variable_test\n def test_fit_posterior_with_joint_q(self):\n\n # Target distribution: equiv to MVNFullCovariance(cov=[[1., 1.], [1., 2.]])\n def p_log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n\n # The Q family is a joint distribution that can express any 2D MVN.\n b = tf.Variable([0., 0.])\n l = tfp.util.TransformedVariable(tf.eye(2), tfb.FillScaleTriL())\n def trainable_q_fn():\n z = yield tfd.JointDistributionCoroutine.Root(\n tfd.Normal(b[0], l[0, 0], name='z'))\n _ = yield tfd.Normal(b[1] + l[1, 0] * z, l[1, 1], name='x')\n q = tfd.JointDistributionCoroutine(trainable_q_fn)\n\n seed = test_util.test_seed()\n loss_curve = tfp.vi.fit_surrogate_posterior(\n p_log_prob, q, num_steps=1000, sample_size=100,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n seed=seed)\n self.evaluate(tf1.global_variables_initializer())\n loss_curve_ = self.evaluate((loss_curve))\n\n # Since the Q family includes the true distribution, the optimized\n # loss should be (approximately) zero.\n self.assertAllClose(loss_curve_[-1], 0., atol=0.1)\n\n @test_util.jax_disable_variable_test\n def test_inhomogeneous_poisson_process_example(self):\n # Toy 1D data.\n index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(\n [-1, 1]).astype(np.float32)\n observed_counts = np.array(\n [100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)\n\n # Trainable GP hyperparameters.\n kernel_log_amplitude = tf.Variable(0., name='kernel_log_amplitude')\n kernel_log_lengthscale = tf.Variable(0., name='kernel_log_lengthscale')\n observation_noise_log_scale = tf.Variable(\n 0., name='observation_noise_log_scale')\n\n # Generative model.\n def model_fn():\n kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(\n amplitude=tf.exp(kernel_log_amplitude),\n length_scale=tf.exp(kernel_log_lengthscale))\n latent_log_rates = yield tfd.JointDistributionCoroutine.Root(\n tfd.GaussianProcess(\n kernel,\n index_points=index_points,\n observation_noise_variance=tf.exp(observation_noise_log_scale),\n name='latent_log_rates'))\n yield tfd.Independent(\n tfd.Poisson(log_rate=latent_log_rates),\n reinterpreted_batch_ndims=1, name='y')\n model = tfd.JointDistributionCoroutine(model_fn, name='model')\n\n # Variational model.\n logit_locs = tf.Variable(tf.zeros(observed_counts.shape))\n logit_softplus_scales = tf.Variable(tf.ones(observed_counts.shape) * -1)\n def variational_model_fn():\n _ = yield tfd.JointDistributionCoroutine.Root(tfd.Independent(\n tfd.Normal(loc=logit_locs,\n scale=tf.nn.softplus(logit_softplus_scales)),\n reinterpreted_batch_ndims=1))\n _ = yield tfd.VectorDeterministic(observed_counts)\n q = tfd.JointDistributionCoroutine(variational_model_fn,\n name='variational_model')\n\n losses, sample_path = tfp.vi.fit_surrogate_posterior(\n target_log_prob_fn=lambda *args: model.log_prob(args),\n surrogate_posterior=q,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n num_steps=100,\n seed=test_util.test_seed(),\n sample_size=1,\n trace_fn=lambda t: (t.loss, q.sample(seed=42)[0]))\n\n self.evaluate(tf1.global_variables_initializer())\n losses_, sample_path_ = self.evaluate((losses, sample_path))\n self.assertLess(losses_[-1], 80.) # Optimal loss is roughly 40.\n # Optimal latent logits are approximately the log observed counts.\n self.assertAllClose(sample_path_[-1], np.log(observed_counts), atol=1.0)\n\n\n@test_util.test_all_tf_execution_regimes\nclass StatelessOptimizationTests(test_util.TestCase):\n\n def test_importance_sampling_example(self):\n if not JAX_MODE:\n self.skipTest('Requires optax.')\n import optax # pylint: disable=g-import-not-at-top\n\n init_seed, opt_seed, eval_seed = tfp.random.split_seed(\n test_util.test_seed(sampler_type='stateless'), n=3)\n\n def log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n conditioned_log_prob = lambda z: log_prob(z, x=5.)\n\n init_normal, build_normal = tfp.experimental.util.make_trainable_stateless(\n tfd.Normal)\n # Fit `q` with an importance-weighted variational loss.\n optimized_parameters, _ = tfp.vi.fit_surrogate_posterior_stateless(\n conditioned_log_prob,\n build_surrogate_posterior_fn=build_normal,\n initial_parameters=init_normal(seed=init_seed),\n importance_sample_size=10,\n optimizer=optax.adam(0.1),\n num_steps=200,\n seed=opt_seed)\n q_z = build_normal(*optimized_parameters)\n\n # Estimate posterior statistics with importance sampling.\n zs, q_log_prob = self.evaluate(q_z.experimental_sample_and_log_prob(\n 1000, seed=eval_seed))\n self_normalized_log_weights = tf.nn.log_softmax(\n conditioned_log_prob(zs) - q_log_prob)\n posterior_mean = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * zs,\n axis=0)\n self.assertAllClose(posterior_mean, 2.5, atol=1e-1)\n\n posterior_variance = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * (zs - posterior_mean)**2,\n axis=0)\n self.assertAllClose(posterior_variance, 0.5, atol=1e-1)\n\n def test_inhomogeneous_poisson_process_example(self):\n opt_seed, eval_seed = tfp.random.split_seed(\n test_util.test_seed(sampler_type='stateless'), n=2)\n\n # Toy 1D data.\n index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(\n [-1, 1]).astype(np.float32)\n observed_counts = np.array(\n [100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)\n\n # Generative model.\n def model_fn():\n kernel_amplitude = yield tfd.LogNormal(\n loc=0., scale=1., name='kernel_amplitude')\n kernel_lengthscale = yield tfd.LogNormal(\n loc=0., scale=1., name='kernel_lengthscale')\n observation_noise_scale = yield tfd.LogNormal(\n loc=0., scale=1., name='observation_noise_scale')\n kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(\n amplitude=kernel_amplitude,\n length_scale=kernel_lengthscale)\n latent_log_rates = yield tfd.GaussianProcess(\n kernel,\n index_points=index_points,\n observation_noise_variance=observation_noise_scale,\n name='latent_log_rates')\n yield tfd.Independent(tfd.Poisson(log_rate=latent_log_rates),\n reinterpreted_batch_ndims=1,\n name='y')\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn)\n pinned = model.experimental_pin(y=observed_counts)\n\n initial_parameters = (0., 0., 0., # Raw kernel parameters.\n tf.zeros_like(observed_counts), # `logit_locs`\n tf.zeros_like(observed_counts)) # `logit_raw_scales`\n\n def build_surrogate_posterior_fn(\n raw_kernel_amplitude, raw_kernel_lengthscale,\n raw_observation_noise_scale,\n logit_locs, logit_raw_scales):\n\n def variational_model_fn():\n # Fit the kernel parameters as point masses.\n yield tfd.Deterministic(\n tf.nn.softplus(raw_kernel_amplitude), name='kernel_amplitude')\n yield tfd.Deterministic(\n tf.nn.softplus(raw_kernel_lengthscale), name='kernel_lengthscale')\n yield tfd.Deterministic(\n tf.nn.softplus(raw_observation_noise_scale),\n name='kernel_observation_noise_scale')\n # Factored normal posterior over the GP logits.\n yield tfd.Independent(\n tfd.Normal(loc=logit_locs,\n scale=tf.nn.softplus(logit_raw_scales)),\n reinterpreted_batch_ndims=1,\n name='latent_log_rates')\n return tfd.JointDistributionCoroutineAutoBatched(variational_model_fn)\n\n if not JAX_MODE:\n return\n import optax # pylint: disable=g-import-not-at-top\n\n [\n optimized_parameters,\n (losses, _, sample_path)\n ] = tfp.vi.fit_surrogate_posterior_stateless(\n target_log_prob_fn=pinned.unnormalized_log_prob,\n build_surrogate_posterior_fn=build_surrogate_posterior_fn,\n initial_parameters=initial_parameters,\n optimizer=optax.adam(learning_rate=0.1),\n sample_size=1,\n num_steps=500,\n trace_fn=lambda traceable_quantities: ( # pylint: disable=g-long-lambda\n traceable_quantities.loss,\n tf.nn.softplus(traceable_quantities.parameters[0]),\n build_surrogate_posterior_fn(\n *traceable_quantities.parameters).sample(seed=eval_seed)[-1]),\n seed=opt_seed)\n surrogate_posterior = build_surrogate_posterior_fn(*optimized_parameters)\n surrogate_posterior.sample(seed=eval_seed)\n\n losses_, sample_path_ = self.evaluate((losses, sample_path))\n self.assertLess(losses_[-1], 80.) # Optimal loss is roughly 40.\n # Optimal latent logits are approximately the log observed counts.\n self.assertAllClose(sample_path_[-1], np.log(observed_counts), atol=1.0)\n\n\nif __name__ == '__main__':\n test_util.main()\n"
] | [
[
"tensorflow.compat.v2.optimizers.Adam",
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.nn.softplus",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.eye",
"tensorflow.compat.v2.zeros",
"numpy.log",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v2.ones",
"numpy.sqrt",
"numpy.array",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.zeros_like"
]
] |
phgupta/XBOS | [
"1fea0b024d97ae142d97b3a94510403928ed44b7"
] | [
"services/occupancy/server.py"
] | [
"from concurrent import futures\nimport time\nimport grpc\nimport logging\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.DEBUG)\nimport occupancy_pb2\nimport occupancy_pb2_grpc\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\nimport os\nimport xbos_services_utils3 as utils\nimport datetime\nimport pytz\nimport numpy as np\nimport pandas as pd\nimport yaml\n\nDAYS_IN_WEEK = 7\nOCCUPANCY_DATA_PATH = os.environ[\"OCCUPANCY_DATA_PATH\"]\nOCCUPANCY_HOST_ADDRESS = os.environ[\"OCCUPANCY_HOST_ADDRESS\"]\n\ndef _get_occupancy_config(building, zone):\n occ_path = OCCUPANCY_DATA_PATH + \"/\" + building + \"/\" + zone + \".yml\"\n\n if os.path.exists(occ_path):\n with open(occ_path, \"r\") as f:\n try:\n config = yaml.load(f)\n except yaml.YAMLError:\n return None, \"yaml could not read file at: %s\" % occ_path\n else:\n return None, \"occupancy file could not be found. path: %s.\" % occ_path\n\n return config, None\n\n\ndef _get_week_occupancy(building, zone, date, interval):\n \"\"\"\n Gets the occupancy from the zone configuration file. Correctly Resamples the data according to interval\n :param date: The date for which we want to start the week. Timezone aware.\n :param interval: int:seconds. The interval/frequency of resampling.\n :return: pd.Series with time_series index in timezone of building.\n \"\"\"\n config, err = _get_occupancy_config(building, zone)\n if config is None:\n return None, err\n\n # Set the date to the controller timezone.\n building_date = date.astimezone(tz=pytz.timezone(config[\"tz\"]))\n weekday = building_date.weekday()\n\n list_occ_data = []\n\n occ_data = config[\"occupancy\"]\n\n # Note, we need to get a day before the start and after the end of the week to correctly resample due to timezones.\n for i in range(DAYS_IN_WEEK + 2):\n curr_weekday = (weekday + i - 1) % DAYS_IN_WEEK\n curr_day = building_date + datetime.timedelta(days=i - 1)\n\n curr_idx = []\n curr_occ = []\n\n date_occupancy = np.array(occ_data[curr_weekday])\n\n for interval_occupancy in date_occupancy:\n start, end, occ = interval_occupancy\n start = utils.combine_date_time(start, curr_day)\n\n occ = float(occ)\n curr_idx.append(start)\n curr_occ.append(occ)\n\n list_occ_data.append(pd.Series(index=curr_idx, data=curr_occ))\n\n series_occ = pd.concat(list_occ_data)\n\n series_occ = series_occ.tz_convert(date.tzinfo)\n\n # decrements in interval-steps till beginning of day of date.\n decremented_date = utils.decrement_to_start_of_day(date, interval)\n\n series_occ = utils.smart_resample(series_occ, decremented_date, decremented_date + datetime.timedelta(days=7),\n interval, \"pad\")\n\n return series_occ, None\n\n\ndef get_all_occ(building, zone, start, end, interval):\n \"\"\"\n Gets the occupancy of a zone from start to end in the given interval.\n :param building: string\n :param zone: string\n :param start: datetime. timezone aware\n :param end: datetime. timezone aware.\n :param interval: int:seconds. seconds_in_day % interval == 0\n :return:\n\n NOTE: If (end-start).total_seconds % interval != 0, then make new_end such that new_end < end and\n the condition is satisfied. New_end will also not be inclusive.\n \"\"\"\n\n first_seven_days, err = _get_week_occupancy(building, zone, start, interval)\n if first_seven_days is None:\n return None, err\n\n first_seven_days_start = first_seven_days.index[0]\n first_seven_days_end = first_seven_days_start + datetime.timedelta(days=DAYS_IN_WEEK)\n\n if end < first_seven_days_end:\n return first_seven_days[start:end][:-1], None\n\n # get occupancy for the remaining days.\n remaining_data = []\n\n for i in range((end - first_seven_days_end).days + 1):\n curr_offset = i % DAYS_IN_WEEK\n\n curr_time = first_seven_days_end + datetime.timedelta(days=i)\n\n curr_data = first_seven_days[first_seven_days_start + datetime.timedelta(days=curr_offset):\n first_seven_days_start + datetime.timedelta(days=curr_offset + 1)][\n :int(24 * 60 * 60 / interval)]\n\n curr_start_date = curr_time\n curr_end_date = curr_start_date + datetime.timedelta(days=1)\n date_range = pd.date_range(start=curr_start_date, end=curr_end_date, freq=str(interval) + \"S\")[:-1]\n curr_data.index = date_range\n\n remaining_data.append(curr_data)\n\n occupancy_series = pd.concat([first_seven_days] + remaining_data)\n\n return occupancy_series[start:end][:-1], None\n\n\ndef get_occupancy(request):\n \"\"\"Returns preprocessed thermal data for a given request or None.\"\"\"\n logging.info(\"received request:\", request.building, request.zone, request.start, request.end, request.window)\n window_seconds = utils.get_window_in_sec(request.window)\n\n request_length = [len(request.building), len(request.zone), request.start, request.end,\n window_seconds]\n\n if any(v == 0 for v in request_length):\n return None, \"invalid request, empty params\"\n if request.start >= request.end:\n return None, \"invalid request, start date is after end date.\"\n if request.start < 0 or request.end < 0:\n return None, \"invalid request, negative dates\"\n if request.start + (window_seconds * 1e9) > request.end:\n return None, \"invalid request, start date + window is greater than end date\"\n if 60 * 60 % window_seconds != 0:\n return None, \"window does not evenly divide a day (seconds_in_day % window != 0).\"\n\n start_datetime = datetime.datetime.utcfromtimestamp(\n float(request.start / 1e9)).replace(tzinfo=pytz.utc)\n end_datetime = datetime.datetime.utcfromtimestamp(\n float(request.end / 1e9)).replace(tzinfo=pytz.utc)\n\n all_occupancy, err = get_all_occ(request.building, request.zone, start_datetime, end_datetime, window_seconds)\n if all_occupancy is None:\n return [occupancy_pb2.OccupancyPoint()], err\n\n grpc_occ = []\n for idx, row in all_occupancy.iteritems():\n grpc_occ.append(\n occupancy_pb2.OccupancyPoint(time=int(idx.timestamp() * 1e9), occupancy=row))\n return grpc_occ , None\n # return occupancy_pb2.OccupancyReply(occupancies=grpc_occ), None\n\n\nclass OccupancyServicer(occupancy_pb2_grpc.OccupancyServicer):\n def __init__(self):\n pass\n\n def GetOccupancy(self, request, context):\n \"\"\"A simple RPC.\n\n Sends the outside temperature for a given building, within a duration (start, end), and a requested window\n An error is returned if there are no temperature for the given request\n \"\"\"\n occupancy, error = get_occupancy(request)\n if occupancy is None:\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(error)\n return occupancy_pb2.OccupancyPoint()\n elif error is not None:\n context.set_code(grpc.StatusCode.UNAVAILABLE)\n context.set_details(error)\n\n for occ in occupancy:\n yield occ\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n occupancy_pb2_grpc.add_OccupancyServicer_to_server(OccupancyServicer(), server)\n server.add_insecure_port(OCCUPANCY_HOST_ADDRESS)\n logging.info(\"Serving on {0} with data path {1}\".format(OCCUPANCY_HOST_ADDRESS, OCCUPANCY_DATA_PATH))\n server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == '__main__':\n serve()\n"
] | [
[
"numpy.array",
"pandas.Series",
"pandas.concat"
]
] |
xionghuichen/RLAssistant | [
"efbde6609cfbd60646f935b450dac65bcaa340e6"
] | [
"RLA/easy_log/tester.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\n# Author : Xionghui Chen\n# Created : 2017-11-12\n# Modified : 2017-11-12\n# Version : 1.0\nfrom collections import deque\nimport dill\nimport time\nimport os\n\nimport datetime\nimport os.path as osp\nfrom RLA.easy_log.const import *\nfrom RLA.easy_log.time_step import time_step_holder\nfrom RLA.easy_log import logger\nfrom RLA.easy_log.const import *\nimport yaml\nimport shutil\nimport argparse\n\n\nclass ExperimentLoader(object):\n def __init__(self):\n self.task_name = None\n self.record_date = None\n self.root = None\n self.inherit_hp = None\n pass\n\n def config(self, task_name, record_date, root, inherit_hp):\n self.task_name = task_name\n self.record_date = record_date\n self.root = root\n self.inherit_hp = inherit_hp\n\n @property\n def is_valid_config(self):\n if self.record_date is not None and self.task_name is not None and self.root is not None:\n return True\n else:\n logger.warn(\"meet invalid loader config when use it\")\n logger.warn(\"record_date\", self.record_date)\n logger.warn(\"task_name\", self.task_name)\n logger.warn(\"root\", self.root)\n return False\n\n def import_hyper_parameters(self):\n if self.is_valid_config:\n load_tester = Tester.load_tester(self.record_date, self.task_name, self.root)\n args = argparse.Namespace(**load_tester.hyper_param)\n return args\n else:\n return None\n\n def load_from_record_date(self, var_prefix=''):\n if self.is_valid_config:\n loaded_tester = Tester.load_tester(self.record_date, self.task_name, self.root)\n # load checkpoint\n loaded_tester.new_saver(var_prefix=var_prefix, max_to_keep=1)\n load_iter, load_res = loaded_tester.load_checkpoint()\n tester.time_step_holder.set_time(load_iter)\n tester.print_log_dir()\n if self.inherit_hp:\n return load_iter, load_res\n else:\n return 0, load_res\n else:\n return 0, {}\n\n def fork_tester_log_files(self):\n \"\"\"\n copy the log files in task_name/record_date to the new experiment.\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n if self.is_valid_config:\n global tester\n assert isinstance(tester, Tester)\n loaded_tester = Tester.load_tester(self.record_date, self.task_name, self.root)\n # copy log file\n tester.log_file_copy(loaded_tester)\n # copy attribute\n tester.hyper_param = loaded_tester.hyper_param\n tester.hyper_param_record = loaded_tester.hyper_param_record\n tester.private_config = loaded_tester.private_config\n\nexperimental_loader = ExperimentLoader()\n\ndef import_hyper_parameters(task_name, record_date):\n \"\"\"\n return the hyper parameters of the experiment in task_name/record_date, which is stored in Tester.\n\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n logger.warn(\"the function is deprecated. please check the ExperimentLoader as the new implementation\")\n global tester\n assert isinstance(tester, Tester)\n load_tester = tester.load_tester(record_date, task_name, tester.root)\n\n args = argparse.Namespace(**load_tester.hyper_param)\n return args\n\n\ndef load_from_record_date(task_name, record_date):\n \"\"\"\n load the checkpoint of the experiment in task_name/record_date.\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n logger.warn(\"the function is deprecated. please check the ExperimentLoader as the new implementation\")\n global tester\n assert isinstance(tester, Tester)\n load_tester = tester.load_tester(record_date, task_name, tester.root)\n # load checkpoint\n load_tester.new_saver(var_prefix='', max_to_keep=1)\n load_iter, load_res = load_tester.load_checkpoint()\n tester.time_step_holder.set_time(load_iter)\n tester.print_log_dir()\n return load_iter, load_res\n\n\ndef fork_tester_log_files(task_name, record_date):\n \"\"\"\n copy the log files in task_name/record_date to the new experiment.\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n logger.warn(\"the function is deprecated. please check the ExperimentLoader as the new implementation\")\n global tester\n assert isinstance(tester, Tester)\n load_tester = tester.load_tester(record_date, task_name, tester.root)\n # copy log file\n tester.log_file_copy(load_tester)\n # copy attribute\n tester.hyper_param = load_tester.hyper_param\n tester.hyper_param_record = load_tester.hyper_param_record\n tester.private_config = load_tester.private_config\n\nclass Tester(object):\n\n def __init__(self):\n self.__custom_recorder = {}\n self.__ipaddr = None\n self.custom_data = {}\n self.time_step_holder = time_step_holder\n self.hyper_param = {}\n self.strftims = None\n self.private_config = None\n self.last_record_fph_time = None\n self.hyper_param_record = []\n self.metadata_list = []\n self.summary_add_dict = {}\n self._rc_start_time = {}\n self.pkl_dir = None\n self.checkpoint_dir = None\n self.pkl_file = None\n self.results_dir = None\n self.log_dir = None\n self.code_dir = None\n self.saver = None\n self.dl_framework = None\n\n def configure(self, task_name, private_config_path, log_root, run_file=None):\n \"\"\"\n\n :param task_name:\n :param private_config_path:\n :return:\n \"\"\"\n fs = open(private_config_path, encoding=\"UTF-8\")\n self.private_config = yaml.load(fs)\n self.run_file = run_file\n self.task_name = task_name\n self.root = log_root\n logger.info(\"private_config: \")\n self.dl_framework = self.private_config[\"DL_FRAMEWORK\"]\n self.project_root = \"/\".join(private_config_path.split(\"/\")[:-1])\n for k, v in self.private_config.items():\n logger.info(\"k: {}, v: {}\".format(k, v))\n\n def set_hyper_param(self, **argkw):\n \"\"\"\n This method is to record all of hyper parameters to test object.\n\n Place pass your parameters as follow format:\n self.set_hyper_param(param_a=a,param_b=b)\n\n Note: It is invalid to pass a local object to this function.\n\n Parameters\n ----------\n argkw : key-value\n for example: self.set_hyper_param(param_a=a,param_b=b)\n\n \"\"\"\n self.hyper_param = argkw\n\n def update_hyper_param(self, k, v):\n self.hyper_param[k] = v\n\n def clear_record_param(self):\n self.hyper_param_record = []\n\n def log_files_gen(self):\n info = None\n self.record_date = datetime.datetime.now()\n logger.info(\"gen log files for record date : {}\".format(self.record_date))\n if info is None:\n info = self.auto_parse_info()\n info = '&' + info\n self.info = info\n code_dir, _ = self.__create_file_directory(osp.join(self.root, CODE, self.task_name), '', is_file=False)\n log_dir, _ = self.__create_file_directory(osp.join(self.root, LOG, self.task_name), '', is_file=False)\n self.pkl_dir, self.pkl_file = self.__create_file_directory(osp.join(self.root, ARCHIVE_TESTER, self.task_name), '.pkl')\n self.checkpoint_dir, _ = self.__create_file_directory(osp.join(self.root, CHECKPOINT, self.task_name), is_file=False)\n self.results_dir, _ = self.__create_file_directory(osp.join(self.root, OTHER_RESULTS, self.task_name), is_file=False)\n self.log_dir = log_dir\n self.code_dir = code_dir\n\n self._init_logger()\n self.serialize_object_and_save()\n self.__copy_source_code(self.run_file, code_dir)\n self._feed_hyper_params_to_tb()\n self.print_log_dir()\n\n def update_log_files_location(self, root):\n self.root = root\n code_dir, _ = self.__create_file_directory(osp.join(self.root, CODE, self.task_name), '', is_file=False)\n log_dir, _ = self.__create_file_directory(osp.join(self.root, LOG, self.task_name), '', is_file=False)\n self.pkl_dir, self.pkl_file = self.__create_file_directory(osp.join(self.root, ARCHIVE_TESTER, self.task_name), '.pkl')\n self.checkpoint_dir, _ = self.__create_file_directory(osp.join(self.root, CHECKPOINT, self.task_name), is_file=False)\n self.results_dir, _ = self.__create_file_directory(osp.join(self.root, OTHER_RESULTS, self.task_name), is_file=False)\n self.log_dir = log_dir\n self.code_dir = code_dir\n self.print_log_dir()\n\n def _init_logger(self):\n self.writer = None\n # logger configure\n logger.info(\"store file %s\" % self.pkl_file)\n logger.configure(self.log_dir, self.private_config[\"LOG_USED\"])\n for fmt in logger.Logger.CURRENT.output_formats:\n if isinstance(fmt, logger.TensorBoardOutputFormat):\n self.writer = fmt.writer\n if \"tensorboard\" not in self.private_config[\"LOG_USED\"]:\n time_step_holder.config(0, 0, tf_log=False)\n\n def log_file_copy(self, source_tester):\n assert isinstance(source_tester, Tester)\n shutil.rmtree(self.checkpoint_dir)\n shutil.copytree(source_tester.checkpoint_dir, self.checkpoint_dir)\n if os.path.exists(source_tester.results_dir):\n shutil.rmtree(self.results_dir)\n shutil.copytree(source_tester.results_dir, self.results_dir)\n else:\n logger.warn(\"[load warning]: can not find results dir\")\n if os.path.exists(source_tester.log_dir):\n shutil.rmtree(self.log_dir)\n shutil.copytree(source_tester.log_dir, self.log_dir)\n else:\n logger.warn(\"[load warning]: can not find log dir\")\n self._init_logger()\n\n def task_gen(self, task_pattern_list):\n return '-'.join(task_pattern_list)\n\n def print_log_dir(self):\n logger.info(\"log dir: {}\".format(self.log_dir))\n logger.info(\"pkl_file: {}\".format(self.pkl_file))\n logger.info(\"checkpoint_dir: {}\".format(self.checkpoint_dir))\n logger.info(\"results_dir: {}\".format(self.results_dir))\n\n @classmethod\n def load_tester(cls, record_date, task_name, log_root):\n logger.info(\"load tester\")\n res_dir, res_file = cls.log_file_finder(record_date, task_name=task_name,\n file_root=osp.join(log_root, ARCHIVE_TESTER),\n log_type='files')\n import dill\n load_tester = dill.load(open(osp.join(res_dir, res_file), 'rb'))\n assert isinstance(load_tester, Tester)\n logger.info(\"update log files' root\")\n load_tester.update_log_files_location(root=log_root)\n return load_tester\n\n\n def add_record_param(self, keys):\n for k in keys:\n if '.' in k:\n try:\n sub_k_list = k.split('.')\n v = self.hyper_param[sub_k_list[0]]\n for sub_k in sub_k_list[1:]:\n v = v[sub_k]\n self.hyper_param_record.append(str(k) + '=' + str(v).replace('[', '{').replace(']', '}').replace('/', '_'))\n except KeyError as e:\n print(\"do not include dot ('.') in your hyperparemeter name\")\n else:\n self.hyper_param_record.append(str(k) + '=' + str(self.hyper_param[k]).replace('[', '{').replace(']', '}').replace('/', '_'))\n\n def add_summary_to_logger(self, summary, name='', simple_val=False, freq=20):\n if \"tensorboard\" not in self.private_config[\"LOG_USED\"]:\n logger.info(\"skip adding summary to tb\")\n return\n if name not in self.summary_add_dict:\n self.summary_add_dict[name] = []\n if freq > 0:\n summary_ts = int(self.time_step_holder.get_time() / freq)\n else:\n summary_ts = 0\n if freq <= 0 or summary_ts not in self.summary_add_dict[name]:\n from tensorflow.core.framework import summary_pb2\n summ = summary_pb2.Summary()\n summ.ParseFromString(summary)\n if simple_val:\n list_field = summ.ListFields()\n\n def recursion_util(inp_field):\n if hasattr(inp_field, \"__getitem__\"):\n for inp in inp_field:\n recursion_util(inp)\n elif hasattr(inp_field, 'simple_value'):\n logger.record_tabular(name + '/' + inp_field.tag, inp_field.simple_value)\n else:\n pass\n recursion_util(list_field)\n logger.dump_tabular()\n else:\n self.writer.add_summary(summary, self.time_step_holder.get_time())\n self.writer.flush()\n self.summary_add_dict[name].append(summary_ts)\n\n def _feed_hyper_params_to_tb(self):\n if \"tensorboard\" not in self.private_config[\"LOG_USED\"]:\n logger.info(\"skip feeding hyper-param to tb\")\n return\n\n import tensorflow as tf\n with tf.Session(graph=tf.Graph()) as sess:\n hyperparameters = [tf.convert_to_tensor([k, str(v)]) for k, v in self.hyper_param.items()]\n summary = sess.run(tf.summary.text('hyperparameters', tf.stack(hyperparameters)))\n self.add_summary_to_logger(summary, 'hyperparameters', freq=1)\n\n def sync_log_file(self):\n \"\"\"\n syn_log_file is an automatic synchronization function.\n It will send all log files (e.g., code/**, checkpoint/**, log/**, etc.) to your target server via the FTP protocol.\n To run this function, you should add some configuration on SRG.private_config.py\n\n SEND_LOG_FILE: boolean. denotes synchronization or not.\n ftp_server: target server ip address\n username: username of target server\n password: password of target server\n remote_porject_dir: log root of target server, e.g., \"/Project/SRG/SRG/var_gan_imitation/\"\n\n :return:\n \"\"\"\n\n logger.warn(\"sync: start\")\n # ignore_files = self.private_config[\"IGNORE_RULE\"]\n if self.private_config[\"SEND_LOG_FILE\"]:\n from RLA.auto_ftp import FTPHandler\n try:\n ftp = FTPHandler(ftp_server=self.private_config[\"REMOTE_SETTING\"][\"ftp_server\"],\n username=self.private_config[\"REMOTE_SETTING\"][\"username\"],\n password=self.private_config[\"REMOTE_SETTING\"][\"password\"])\n for root, dirs, files in os.walk(self.log_dir):\n suffix = root.split(\"/{}/\".format(LOG))\n assert len(suffix) == 2, \"root should have only one pattern \\\"/log/\\\"\"\n remote_root = osp.join(self.private_config[\"REMOTE_SETTING\"][\"remote_log_root\"], LOG, suffix[1])\n local_root = root\n logger.warn(\"sync {} <- {}\".format(remote_root, local_root))\n for file in files:\n ftp.upload_file(remote_root, local_root, file)\n # for root, dirs, files in os.walk(self.code_dir):\n # remote_root = osp.join(self.private_config.remote_porject_dir, root[3:])\n # local_root = root\n # logger.warn(\"sync {} <- {}\".format(remote_root, local_root))\n # for file in files:\n # ftp.upload_file(remote_root, local_root, file)\n # for root, dirs, files in os.walk(self.checkpoint_dir):\n # for file in files:\n # ftp.upload_file(remote_porject_dir + root[2:], root + '/', file)\n\n logger.warn(\"sync: send success!\")\n except Exception as e:\n logger.warn(\"sending log file failed. {}\".format(e))\n import traceback\n logger.warn(traceback.format_exc())\n\n @classmethod\n def log_file_finder(cls, record_date, task_name='train', file_root='../checkpoint/', log_type='dir'):\n record_date = datetime.datetime.strptime(record_date, '%Y/%m/%d/%H-%M-%S-%f')\n prefix = osp.join(file_root, task_name)\n directory = str(record_date.strftime(\"%Y/%m/%d\"))\n directory = osp.join(prefix, directory)\n file_found = ''\n for root, dirs, files in os.walk(directory):\n if log_type == 'dir':\n search_list = dirs\n elif log_type =='files':\n search_list =files\n else:\n raise NotImplementedError\n for search_item in search_list:\n if search_item.startswith(str(record_date.strftime(\"%H-%M-%S-%f\"))):\n split_dir = search_item.split(' ')\n # self.__ipaddr = split_dir[1]\n info = \" \".join(split_dir[2:])\n logger.info(\"load data: \\n ts {}, \\n ip {}, \\n info {}\".format(split_dir[0], split_dir[1], info))\n file_found = search_item\n break\n return directory, file_found\n\n @property\n def ipaddr(self):\n if self.__ipaddr is None:\n self.__ipaddr = self.__gen_ip()\n return self.__ipaddr\n\n def __gen_ip(self):\n try:\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"1.1.1.1\", 80))\n ip = s.getsockname()[0]\n s.close()\n except Exception as e:\n ip = 'noip'\n return ip\n\n def __copy_source_code(self, run_file, code_dir):\n import shutil\n if self.private_config[\"PROJECT_TYPE\"][\"backup_code_by\"] == 'lib':\n assert os.listdir(code_dir) == []\n os.removedirs(code_dir)\n shutil.copytree(osp.join(self.project_root, self.private_config[\"BACKUP_CONFIG\"][\"lib_dir\"]), code_dir)\n assert run_file is not None, \"you should define the run_file in lib backup mode.\"\n shutil.copy(run_file, code_dir)\n elif self.private_config[\"PROJECT_TYPE\"][\"backup_code_by\"] == 'source':\n for dir_name in self.private_config[\"BACKUP_CONFIG\"][\"backup_code_dir\"]:\n shutil.copytree(osp.join(self.project_root, dir_name), osp.join(code_dir, dir_name))\n else:\n raise NotImplementedError\n\n def record_date_to_str(self, record_date):\n return str(record_date.strftime(\"%H-%M-%S-%f\"))\n\n def __create_file_directory(self, prefix, ext='', is_file=True, record_date=None):\n if record_date is None:\n record_date = self.record_date\n directory = str(record_date.strftime(\"%Y/%m/%d\"))\n directory = osp.join(prefix, directory)\n if is_file:\n os.makedirs(directory, exist_ok=True)\n file_name = '{dir}/{timestep} {ip} {info}{ext}'.format(dir=directory,\n timestep=self.record_date_to_str(record_date),\n ip=str(self.ipaddr),\n info=self.info,\n ext=ext)\n else:\n directory = '{dir}/{timestep} {ip} {info}{ext}/'.format(dir=directory,\n timestep=self.record_date_to_str(record_date),\n ip=str(self.ipaddr),\n info=self.info,\n ext=ext)\n os.makedirs(directory, exist_ok=True)\n file_name = ''\n return directory, file_name\n\n def update_fph(self, cum_epochs):\n if self.last_record_fph_time is None:\n self.last_record_fph_time = time.time()\n else:\n cur_time = time.time()\n duration = (cur_time - self.last_record_fph_time) / 60 / 60\n fph = cum_epochs / duration\n logger.record_tabular('fph', fph)\n # self.last_record_fph_time = cur_time\n logger.dump_tabular()\n\n def time_record(self, name):\n assert name not in self._rc_start_time\n self._rc_start_time[name] = time.time()\n\n def time_record_end(self, name):\n end_time = time.time()\n start_time = self._rc_start_time[name]\n logger.record_tabular(\"time_used/{}\".format(name), end_time - start_time)\n logger.info(\"[test] func {0} time used {1:.2f}\".format(name, end_time - start_time))\n del self._rc_start_time[name]\n\n # Saver manger.\n def new_saver(self, max_to_keep, var_prefix=None):\n \"\"\"\n initialize new tf.Saver\n :param var_prefix: we use var_prefix to filter the variables for saving.\n :param max_to_keep:\n :return:\n \"\"\"\n if self.dl_framework == 'tensorflow':\n import tensorflow as tf\n if var_prefix is None:\n var_prefix = ''\n var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, var_prefix)\n logger.info(\"save variable :\")\n for v in var_list:\n logger.info(v)\n self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep, filename=self.checkpoint_dir, save_relative_paths=True)\n elif self.dl_framework == 'pytorch':\n self.max_to_keep = max_to_keep\n self.checkpoint_keep_list = []\n else:\n raise NotImplementedError\n\n def save_checkpoint(self, model_dict=None):\n if self.dl_framework == 'tensorflow':\n import tensorflow as tf\n iter = self.time_step_holder.get_time()\n cpt_name = osp.join(self.checkpoint_dir, 'checkpoint')\n logger.info(\"save checkpoint to \", cpt_name, iter)\n self.saver.save(tf.get_default_session(), cpt_name, global_step=iter)\n elif self.dl_framework == 'pytorch':\n import torch\n iter = self.time_step_holder.get_time()\n torch.save(model_dict, f=tester.checkpoint_dir + \"checkpoint-{}.pt\".format(iter))\n self.checkpoint_keep_list.append(iter)\n if len(self.checkpoint_keep_list) > self.max_to_keep:\n for i in range(len(self.checkpoint_keep_list) - self.max_to_keep):\n rm_ckp_name = tester.checkpoint_dir + \"checkpoint-{}.pt\".format(self.checkpoint_keep_list[i])\n logger.info(\"rm the older checkpoint\", rm_ckp_name)\n os.remove(rm_ckp_name)\n self.checkpoint_keep_list = self.checkpoint_keep_list[-1 * self.max_to_keep:]\n else:\n raise NotImplementedError\n\n def load_checkpoint(self):\n if self.dl_framework == 'tensorflow':\n # TODO: load with variable scope.\n import tensorflow as tf\n cpt_name = osp.join(self.checkpoint_dir)\n logger.info(\"load checkpoint {}\".format(cpt_name))\n ckpt_path = tf.train.latest_checkpoint(cpt_name)\n self.saver.restore(tf.get_default_session(), ckpt_path)\n max_iter = ckpt_path.split('-')[-1]\n self.time_step_holder.set_time(max_iter)\n return int(max_iter), None\n elif self.dl_framework == 'pytorch':\n import torch\n return self.checkpoint_keep_list[-1], torch.load(tester.checkpoint_dir + \"checkpoint-{}.pt\".format(self.checkpoint_keep_list[-1]))\n\n def auto_parse_info(self):\n return '&'.join(self.hyper_param_record)\n\n\n def add_graph(self, sess):\n assert self.writer is not None\n self.writer.add_graph(sess.graph)\n\n # --- custom data manager --\n def add_custom_data(self, key, data, dtype=list, max_len=-1):\n if key not in self.custom_data:\n if issubclass(dtype, deque):\n assert max_len > 0\n\n self.custom_data[key] = deque(maxlen=max_len)\n self.custom_data[key].append(data)\n elif issubclass(dtype, list):\n self.custom_data[key] = [data]\n else:\n self.custom_data[key] = data\n else:\n if issubclass(dtype, list) or issubclass(dtype, deque):\n self.custom_data[key].append(data)\n else:\n self.custom_data[key] = data\n\n def print_custom_data(self, key, prefix=''):\n assert key in self.custom_data\n import numpy as np\n mean_val = np.mean(self.custom_data[key])\n logger.record_tabular(prefix + key, mean_val)\n\n def clear_custom_data(self, key):\n if key in self.custom_data:\n del self.custom_data[key]\n else:\n logger.warn(\"[WARN] key [{}], not in custom_data\".format(key))\n\n def get_custom_data(self, key):\n if key not in self.custom_data:\n return None\n else:\n return self.custom_data[key]\n\n def serialize_object_and_save(self):\n \"\"\"\n This method is to save test object to a dill.\n This method will be call every time you call add_custom_record or other record function like self.check_and_test\n \"\"\"\n # remove object which can is not serializable\n writer = self.writer\n self.writer = None\n saver = self.saver\n self.saver = None\n with open(self.pkl_file, 'wb') as f:\n dill.dump(self, f)\n self.writer = writer\n self.saver = saver\n\n def print_args(self):\n sort_list = sorted(self.hyper_param.items(), key=lambda i: i[0])\n for key, value in sort_list:\n logger.info(\"key: %s, value: %s\" % (key, value))\n\n def print_large_memory_variable(self):\n import sys\n large_mermory_dict = {}\n\n def sizeof_fmt(num, suffix='B'):\n for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f %s%s\" % (num, unit, suffix), unit\n num /= 1024.0\n return \"%.1f %s%s\" % (num, 'Yi', suffix), 'Yi'\n\n for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),\n key=lambda x: -x[1])[:10]:\n size_str, fmt_type = sizeof_fmt(size)\n if fmt_type in ['', 'Ki', 'Mi']:\n continue\n logger.info(\"{:>30}: {:>8}\".format(name, size_str))\n large_mermory_dict[str(name)] = size_str\n if large_mermory_dict != {}:\n summary = self.dict_to_table_text_summary(large_mermory_dict, 'large_memory')\n self.add_summary_to_logger(summary, 'large_memory')\n\n def dict_to_table_text_summary(self, input_dict, name):\n import tensorflow as tf\n with tf.Session(graph=tf.Graph()) as sess:\n to_tensor = [tf.convert_to_tensor([k, str(v)]) for k, v in input_dict.items()]\n return sess.run(tf.summary.text(name, tf.stack(to_tensor)))\n\n\ntester = Tester()\n"
] | [
[
"tensorflow.stack",
"tensorflow.get_collection",
"tensorflow.get_default_session",
"tensorflow.Graph",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.Saver",
"tensorflow.core.framework.summary_pb2.Summary",
"numpy.mean"
]
] |
YukiHata-ITS/uda_nd013-c1-vision-starter | [
"4785970ae56a21905d63ae429b3a6ee717804668"
] | [
"workspace/inference_video.py"
] | [
"import argparse\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import animation\n\nfrom object_detection.builders.dataset_builder import build as build_dataset\nfrom object_detection.utils.config_util import get_configs_from_pipeline_file\nfrom object_detection.utils.label_map_util import create_category_index_from_labelmap\nfrom object_detection.utils import visualization_utils as viz_utils\n\nfrom utils import get_module_logger\n\n\ndef main(labelmap_path, model_path, tf_record_path, config_path, output_path):\n \"\"\"\n Use a model and a tf record file and create a mp4 video\n args:\n - labelmap_path [str]: path to labelmap file\n - model_path [str]: path to exported model \n - tf_record_path [str]: path to tf record file to visualize\n - config_path [str]: path to config file\n - output_path [str]: path to mp4 file\n\n Save the results as mp4 file\n \"\"\"\n # load label map\n category_index = create_category_index_from_labelmap(labelmap_path,\n use_display_name=True)\n\n # Load saved model and build the detection function\n logger.info(f'Loading model from {model_path}')\n detect_fn = tf.saved_model.load(model_path)\n\n # open config file\n logger.info(f'Loading config from {config_path}')\n configs = get_configs_from_pipeline_file(config_path)\n eval_config = configs['eval_config']\n eval_input_config = configs['eval_input_config']\n model_config = configs['model']\n\n # update the eval config file\n eval_input_config.tf_record_input_reader.input_path[:] = [tf_record_path]\n dataset = build_dataset(eval_input_config)\n\n # build dataset\n dataset = build_dataset(eval_input_config)\n\n # here we infer on the entire dataset\n images = []\n logger.info(f'Inference on {tf_record_path}')\n for idx, batch in enumerate(dataset):\n if idx % 50:\n logger.info(f'Step: {idx}')\n # add new axis and feed into model \n input_tensor = batch['image']\n image_np = input_tensor.numpy().astype(np.uint8)\n input_tensor = input_tensor[tf.newaxis, ...]\n\n detections = detect_fn(input_tensor)\n \n # tensor -> numpy arr, remove one dimensions\n num_detections = int(detections.pop('num_detections'))\n detections = {key: value[0, ...].numpy()\n for key, value in detections.items()}\n detections['num_detections'] = num_detections\n\n # detection_classes should be ints.\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n\n image_np_with_detections = image_np.copy()\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'],\n detections['detection_classes'],\n detections['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=200,\n min_score_thresh=.30,\n agnostic_mode=False)\n images.append(image_np_with_detections)\n \n # now we can create the animation\n f = plt.figure()\n f.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)\n ax = plt.subplot(111)\n ax.axis('off')\n im_obj = ax.imshow(images[0])\n\n def animate(idx):\n image = images[idx]\n im_obj.set_data(image)\n \n anim = animation.FuncAnimation(f, animate, frames=198)\n anim.save(output_path, fps=5, dpi=300)\n\n\nif __name__ == \"__main__\": \n logger = get_module_logger(__name__)\n\n parser = argparse.ArgumentParser(description='Create video')\n parser.add_argument('--labelmap_path', required=True, type=str,\n help='path to the label map')\n parser.add_argument('--model_path', required=True, type=str,\n help='path to the saved model folder')\n parser.add_argument('--tf_record_path', required=True, type=str,\n help='path to the tf record file')\n parser.add_argument('--config_path', required=False, type=str,\n default='pipeline.config', \n help='path to the config file')\n parser.add_argument('--output_path', required=False, type=str, \n default='animation.mp4', \n help='path of the saved file')\n args = parser.parse_args()\n main(args.labelmap_path, \n args.model_path, \n args.tf_record_path, \n args.config_path, \n args.output_path)\n"
] | [
[
"tensorflow.saved_model.load",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.animation.FuncAnimation"
]
] |
periakiva/finding_berries | [
"1dfc7cf00c384321e39872921051dc9535355e53"
] | [
"models/fpn/fpn_decoder.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Conv3x3GNReLU(nn.Module):\n def __init__(self, in_channels, out_channels, upsample=False):\n super().__init__()\n self.upsample = upsample\n self.block = nn.Sequential(\n nn.Conv2d(\n in_channels, out_channels, (3, 3), stride=1, padding=1, bias=False\n ),\n nn.GroupNorm(32, out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.block(x)\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode=\"bilinear\", align_corners=True)\n return x\n\n\nclass FPNBlock(nn.Module):\n def __init__(self, pyramid_channels, skip_channels):\n super().__init__()\n self.skip_conv = nn.Conv2d(skip_channels, pyramid_channels, kernel_size=1)\n\n def forward(self, x, skip=None):\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n skip = self.skip_conv(skip)\n x = x + skip\n return x\n\n\nclass SegmentationBlock(nn.Module):\n def __init__(self, in_channels, out_channels, n_upsamples=0):\n super().__init__()\n\n blocks = [Conv3x3GNReLU(in_channels, out_channels, upsample=bool(n_upsamples))]\n\n if n_upsamples > 1:\n for _ in range(1, n_upsamples):\n blocks.append(Conv3x3GNReLU(out_channels, out_channels, upsample=True))\n\n self.block = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.block(x)\n\n\nclass MergeBlock(nn.Module):\n def __init__(self, policy):\n super().__init__()\n if policy not in [\"add\", \"cat\"]:\n raise ValueError(\n \"`merge_policy` must be one of: ['add', 'cat'], got {}\".format(\n policy\n )\n )\n self.policy = policy\n\n def forward(self, x):\n if self.policy == 'add':\n return sum(x)\n elif self.policy == 'cat':\n return torch.cat(x, dim=1)\n else:\n raise ValueError(\n \"`merge_policy` must be one of: ['add', 'cat'], got {}\".format(self.policy)\n )\n\n\nclass FPNDecoder(nn.Module):\n def __init__(\n self,\n encoder_channels,\n encoder_depth=5,\n pyramid_channels=256,\n segmentation_channels=128,\n dropout=0.2,\n merge_policy=\"add\",\n ):\n super().__init__()\n\n self.out_channels = segmentation_channels if merge_policy == \"add\" else segmentation_channels * 4\n if encoder_depth < 3:\n raise ValueError(\"Encoder depth for FPN decoder cannot be less than 3, got {}.\".format(encoder_depth))\n\n encoder_channels = encoder_channels[::-1]\n encoder_channels = encoder_channels[:encoder_depth + 1]\n\n self.p5 = nn.Conv2d(encoder_channels[0], pyramid_channels, kernel_size=1)\n self.p4 = FPNBlock(pyramid_channels, encoder_channels[1])\n self.p3 = FPNBlock(pyramid_channels, encoder_channels[2])\n self.p2 = FPNBlock(pyramid_channels, encoder_channels[3])\n \n self.seg_blocks = nn.ModuleList([\n SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=n_upsamples)\n for n_upsamples in [3, 2, 1, 0]\n ])\n\n self.merge = MergeBlock(merge_policy)\n self.dropout = nn.Dropout2d(p=dropout, inplace=True)\n\n def forward(self, *features):\n c2, c3, c4, c5 = features[-4:]\n\n p5 = self.p5(c5)\n p4 = self.p4(p5, c4)\n p3 = self.p3(p4, c3)\n p2 = self.p2(p3, c2)\n\n feature_pyramid = [seg_block(p) for seg_block, p in zip(self.seg_blocks, [p5, p4, p3, p2])]\n x = self.merge(feature_pyramid)\n x = self.dropout(x)\n\n return x"
] | [
[
"torch.nn.GroupNorm",
"torch.nn.Dropout2d",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.cat",
"torch.nn.functional.interpolate"
]
] |
cbigit/unet | [
"89d5576624620293419f1fa8fc16b47219dcad0e"
] | [
"2D/plot_tf_inference_examples.py"
] | [
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\n\"\"\"\nTakes a trained model and performs inference on a few validation examples.\n\"\"\"\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # Get rid of the AVX, SSE warnings\n\nimport numpy as np\nimport tensorflow as tf\nimport time\nfrom tensorflow import keras as K\nimport settings\nimport argparse\nfrom dataloader import DatasetGenerator\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use(\"Agg\")\n\n\nparser = argparse.ArgumentParser(\n description=\"TensorFlow Inference example for trained 2D U-Net model on BraTS.\",\n add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument(\"--data_path\", default=settings.DATA_PATH,\n help=\"the path to the data\")\nparser.add_argument(\"--output_path\", default=settings.OUT_PATH,\n help=\"the folder to save the model and checkpoints\")\nparser.add_argument(\"--inference_filename\", default=settings.INFERENCE_FILENAME,\n help=\"the TensorFlow inference model filename\")\nparser.add_argument(\"--use_pconv\",help=\"use partial convolution based padding\",\n action=\"store_true\",\n default=settings.USE_PCONV)\nparser.add_argument(\"--output_pngs\", default=\"inference_examples\",\n help=\"the directory for the output prediction pngs\")\n\nparser.add_argument(\"--intraop_threads\", default=settings.NUM_INTRA_THREADS,\n type=int, help=\"Number of intra-op-parallelism threads\")\nparser.add_argument(\"--interop_threads\", default=settings.NUM_INTER_THREADS,\n type=int, help=\"Number of inter-op-parallelism threads\")\nparser.add_argument(\"--crop_dim\", default=settings.CROP_DIM,\n type=int, help=\"Crop dimension for images\")\nparser.add_argument(\"--seed\", default=settings.SEED,\n type=int, help=\"Random seed\")\n\nargs = parser.parse_args()\n\ndef test_intel_tensorflow():\n \"\"\"\n Check if Intel version of TensorFlow is installed\n \"\"\"\n import tensorflow as tf\n \n print(\"We are using Tensorflow version {}\".format(tf.__version__))\n \n major_version = int(tf.__version__.split(\".\")[0])\n if major_version >= 2:\n from tensorflow.python import _pywrap_util_port\n print(\"Intel-optimizations (DNNL) enabled:\", _pywrap_util_port.IsMklEnabled())\n else:\n print(\"Intel-optimizations (DNNL) enabled:\", tf.pywrap_tensorflow.IsMklEnabled()) \n\ntest_intel_tensorflow()\n\n\ndef calc_dice(target, prediction, smooth=0.0001):\n \"\"\"\n Sorenson Dice\n \\frac{ 2 \\times \\left | T \\right | \\cap \\left | P \\right |}{ \\left | T \\right | + \\left | P \\right | }\n where T is ground truth (target) mask and P is the prediction mask\n \"\"\"\n prediction = np.round(prediction)\n\n numerator = 2.0 * np.sum(target * prediction) + smooth\n denominator = np.sum(target) + np.sum(prediction) + smooth\n coef = numerator / denominator\n\n return coef\n\n\ndef calc_soft_dice(target, prediction, smooth=0.0001):\n \"\"\"\n Sorensen (Soft) Dice coefficient - Don't round predictions\n \"\"\"\n numerator = 2.0 * np.sum(target * prediction) + smooth\n denominator = np.sum(target) + np.sum(prediction) + smooth\n coef = numerator / denominator\n\n return coef\n\n\ndef plot_results(ds, idx, png_directory):\n \n dt = ds.get_dataset().take(1).as_numpy_iterator() # Get some examples (use different seed for different samples)\n\n plt.figure(figsize=(10,10))\n\n for img, msk in dt:\n\n plt.subplot(1, 3, 1)\n plt.imshow(img[idx, :, :, 0], cmap=\"bone\", origin=\"lower\")\n plt.title(\"MRI {}\".format(idx), fontsize=20)\n\n plt.subplot(1, 3, 2)\n plt.imshow(msk[idx, :, :], cmap=\"bone\", origin=\"lower\")\n plt.title(\"Ground truth\", fontsize=20)\n\n plt.subplot(1, 3, 3)\n\n print(\"Index {}: \".format(idx), end=\"\")\n \n # Predict using the TensorFlow model\n start_time = time.time()\n prediction = model.predict(img[[idx]])\n print(\"Elapsed time = {:.4f} msecs, \".format(1000.0*(time.time()-start_time)), end=\"\")\n \n plt.imshow(prediction[0,:,:,0], cmap=\"bone\", origin=\"lower\")\n dice_coef = calc_dice(msk[idx], prediction)\n print(\"Dice coefficient = {:.4f}, \".format(dice_coef), end=\"\")\n plt.title(\"Prediction\\nDice = {:.4f}\".format(dice_coef), fontsize=20)\n\n save_name = os.path.join(png_directory, \"prediction_tf_{}.png\".format(idx))\n print(\"Saved as: {}\".format(save_name))\n plt.savefig(save_name)\n \nif __name__ == \"__main__\":\n\n model_filename = os.path.join(args.output_path, args.inference_filename)\n\n ds_testing = DatasetGenerator(os.path.join(args.data_path, \"testing/*.npz\"), \n crop_dim=args.crop_dim, \n batch_size=128, \n augment=False, \n seed=args.seed)\n # Load model\n if args.use_pconv:\n from model_pconv import unet\n unet_model = unet(use_pconv=True)\n else:\n from model import unet\n unet_model = unet()\n \n \n model = unet_model.load_model(model_filename)\n\n # Create output directory for images\n png_directory = args.output_pngs\n if not os.path.exists(png_directory):\n os.makedirs(png_directory)\n\n # Plot some results\n # The plots will be saved to the png_directory\n # Just picking some random samples.\n indicies_testing = [11,17,25,56,89,101,119]\n\n for idx in indicies_testing:\n plot_results(ds_testing, idx, png_directory)\n"
] | [
[
"tensorflow.python._pywrap_util_port.IsMklEnabled",
"tensorflow.pywrap_tensorflow.IsMklEnabled",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.use",
"numpy.round",
"tensorflow.__version__.split"
]
] |
willfrey/ray | [
"288a81b42ef0186ab4db33b30191614a7bdb69f6"
] | [
"python/ray/workflow/tests/test_checkpoint_2.py"
] | [
"import ray\nimport time\nimport pytest\nfrom ray.tests.conftest import * # noqa\n\nimport numpy as np\nfrom ray import workflow\nfrom ray.workflow.tests import utils\nfrom ray.exceptions import RaySystemError\n\n\nSIZE = 2 ** 15\n\n\[email protected]\ndef checkpoint_dag(checkpoint):\n @ray.remote\n def large_input():\n return np.arange(SIZE)\n\n @ray.remote\n def identity(x):\n if not utils.check_global_mark():\n import os\n\n os.kill(os.getpid(), 9)\n return x\n\n @ray.remote\n def average(x):\n return np.mean(x)\n\n x = large_input.options(**workflow.options(checkpoint=checkpoint)).bind()\n y = identity.options(**workflow.options(checkpoint=checkpoint)).bind(x)\n return workflow.continuation(average.bind(y))\n\n\ndef test_checkpoint_dag_recovery_skip(workflow_start_regular_shared):\n utils.unset_global_mark()\n\n start = time.time()\n with pytest.raises(RaySystemError):\n workflow.create(\n checkpoint_dag.options(**workflow.options(checkpoint=False)).bind(False)\n ).run(workflow_id=\"checkpoint_skip_recovery\")\n run_duration_skipped = time.time() - start\n\n utils.set_global_mark()\n\n start = time.time()\n recovered = ray.get(workflow.resume(\"checkpoint_skip_recovery\"))\n recover_duration_skipped = time.time() - start\n assert np.isclose(recovered, np.arange(SIZE).mean())\n\n print(\n f\"[skipped] run_duration = {run_duration_skipped}, \"\n f\"recover_duration = {recover_duration_skipped}\"\n )\n\n\ndef test_checkpoint_dag_recovery_partial(workflow_start_regular_shared):\n utils.unset_global_mark()\n\n start = time.time()\n with pytest.raises(RaySystemError):\n workflow.create(checkpoint_dag.bind(False)).run(\n workflow_id=\"checkpoint_partial_recovery\"\n )\n run_duration_partial = time.time() - start\n\n utils.set_global_mark()\n\n start = time.time()\n recovered = ray.get(workflow.resume(\"checkpoint_partial_recovery\"))\n recover_duration_partial = time.time() - start\n assert np.isclose(recovered, np.arange(SIZE).mean())\n print(\n f\"[partial] run_duration = {run_duration_partial}, \"\n f\"recover_duration = {recover_duration_partial}\"\n )\n\n\ndef test_checkpoint_dag_recovery_whole(workflow_start_regular_shared):\n utils.unset_global_mark()\n\n start = time.time()\n with pytest.raises(RaySystemError):\n workflow.create(checkpoint_dag.bind(True)).run(\n workflow_id=\"checkpoint_whole_recovery\"\n )\n run_duration_whole = time.time() - start\n\n utils.set_global_mark()\n\n start = time.time()\n recovered = ray.get(workflow.resume(\"checkpoint_whole_recovery\"))\n recover_duration_whole = time.time() - start\n assert np.isclose(recovered, np.arange(SIZE).mean())\n\n print(\n f\"[whole] run_duration = {run_duration_whole}, \"\n f\"recover_duration = {recover_duration_whole}\"\n )\n\n\[email protected](\n reason=(\n \"Currently it is not clear how and if we need to check\"\n \"side effects of skipping checkpointing, e.g., the\"\n \"violation of exactly-once execution guarantee of workflow.\"\n )\n)\ndef test_checkpoint_dag_validation(workflow_start_regular):\n @ray.remote\n def identity(x):\n return x\n\n @ray.remote\n def average(x):\n return np.mean(x)\n\n @workflow.step\n def valid_checkpoint_dag_1():\n y = identity.options(checkpoint=False).step(42)\n return average.options(checkpoint=True).step(y)\n\n @workflow.step\n def invalid_checkpoint_dag_1():\n y = identity.options(checkpoint=True).step(42)\n return average.options(checkpoint=True).step(y)\n\n @workflow.step\n def invalid_checkpoint_dag_2():\n y = valid_checkpoint_dag_1.options(checkpoint=False).step()\n return average.options(checkpoint=True).step(y)\n\n valid_checkpoint_dag_1.options(checkpoint=False).step().run()\n # check invalid configuration\n with pytest.raises(RaySystemError):\n invalid_checkpoint_dag_1.options(checkpoint=False).step().run()\n # check invalid configuration\n with pytest.raises(RaySystemError):\n invalid_checkpoint_dag_2.options(checkpoint=False).step().run()\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.arange",
"numpy.mean"
]
] |
jaayeon/emotion_classification | [
"5514360803b80f1b3dc607c077a14785d9a01669"
] | [
"Resnet_GRU/data_loader.py"
] | [
"#!/usr/bin/python\r\n# encoding: utf-8\r\n\r\nimport glob\r\nimport os\r\nimport random\r\nimport torch\r\nimport numpy as np\r\nfrom torch.utils import data\r\nimport cv2\r\nfrom PIL import Image\r\nfrom utils import *\r\nfrom torchvision import transforms\r\nimport random\r\nimport numpy as np\r\nimport random\r\n\r\nclass FER(data.Dataset):\r\n def __init__(self, opt, mode):\r\n\r\n self.opt = opt\r\n self.mode = mode\r\n self.img_size = opt.img_size\r\n self.length = opt.length\r\n self.iter = opt.iter\r\n # self.img_list = glob.glob(os.path.join(opt.train_dir,'**', '*.jpg'), recursive=True)\r\n if self.mode == 'train':\r\n img_list_0 = glob.glob(os.path.join(opt.train_dir, 'Not_Understand', '*.jpg'))\r\n img_list_1 = glob.glob(os.path.join(opt.train_dir, 'Neutral', '*.jpg'))\r\n img_list_2 = glob.glob(os.path.join(opt.train_dir, 'Understand', '*.jpg'))\r\n elif self.mode == 'valid':\r\n img_list_0 = glob.glob(os.path.join(opt.valid_dir, 'Not_Understand', '*.jpg'))\r\n img_list_1 = glob.glob(os.path.join(opt.valid_dir, 'Neutral', '*.jpg'))\r\n img_list_2 = glob.glob(os.path.join(opt.valid_dir, 'Understand', '*.jpg'))\r\n self.img_list_0 = sorted(img_list_0)\r\n self.img_list_1 = sorted(img_list_1)\r\n self.img_list_2 = sorted(img_list_2)\r\n self.len0 = len(self.img_list_0)\r\n self.len1 = len(self.img_list_1)\r\n self.len2 = len(self.img_list_2)\r\n # print('Number of each class images >> len0 : {}, len1 : {}, len2 : {}'.format(self.len0, self.len1, self.len2))\r\n\r\n\r\n def __getitem__(self, index): #L,C,H,W\r\n \r\n r = np.random.randint(9)\r\n img_path = []\r\n seq = np.zeros((self.length, 1, self.img_size, self.img_size))\r\n\r\n if self.mode =='train' or self.mode =='valid':\r\n if (r%3) == 0: img_list = self.img_list_0; num = self.len0\r\n elif (r%3) == 1: img_list = self.img_list_1; num = self.len1\r\n else : img_list = self.img_list_2; num = self.len2\r\n\r\n idx =random.sample(range(num), self.length)\r\n for i, img_num in enumerate(idx) : \r\n img_path.append(img_list[img_num])\r\n img = cv2.imread(img_list[img_num], cv2.IMREAD_GRAYSCALE)\r\n aug_img = self.transform(img)\r\n # print('aug_img.shape :',aug_img.shape)\r\n seq[i, :, :, :] = aug_img\r\n\r\n seq = torch.from_numpy(seq).float()\r\n label=int(img_path[0].split('_')[-1].split('.')[0]) #0-not understand ,1-neutral ,2-understand\r\n label = torch.LongTensor([label])\r\n # print('FER/ img_path : {}, label : {}'.format(img_path[0].split('\\\\')[-1], label))\r\n return seq, label\r\n\r\n else :\r\n img=self.get_real_data()\r\n return img\r\n\r\n def __len__(self):\r\n if self.mode == 'train':\r\n if self.iter:\r\n return self.iter\r\n else : \r\n return int(((self.len0 + self.len1 + self.len2)))\r\n elif self.mode == 'valid': #valid는 iter에 상관없이 항상 모든 데이터 보게끔\r\n return int(((self.len0 + self.len1 + self.len2)))\r\n\r\n\r\n\r\n def transform(self, img):\r\n ndim = img.ndim\r\n # print('data_loader img.shape : ',img.shape)\r\n if ndim==2:\r\n img = np.expand_dims(img, axis=0)\r\n else :\r\n h,w,c=img.shape\r\n if c==3:\r\n #color to gray\r\n pass\r\n aug_img = self.augment(img)\r\n # print('data_loader aug_img.shape : ',aug_img.shape)\r\n return aug_img\r\n\r\n\r\n def augment(self, img, hflip=True, rot=True): #c,h,w\r\n hflip = hflip and random.random() < 0.5\r\n vflip = rot and random.random() < 0.5\r\n rot90 = rot and random.random() < 0.5\r\n\r\n if hflip: img=img[:,:,::-1].copy()\r\n if vflip: img=img[:,::-1,:].copy()\r\n if rot90: img=img.transpose(0,2,1).copy()\r\n\r\n return img\r\n\r\n\r\n\r\n #for real-time\r\n def face_detection(self):\r\n #face detect\r\n #to gray scale\r\n pass\r\n\r\n #for real-time\r\n def get_real_data(self):\r\n img_shape=(1,self.img_size, self.img_size)\r\n crop_img=self.face_detection()\r\n #resize\r\n resize_img=np.resize(crop_img, img_shape)\r\n aug_img = self.augment(resize_img)\r\n return aug_img\r\n\r\n\r\n\r\ndef get_dataloader(opt, mode):\r\n\r\n dataset = FER(opt, mode)\r\n length = len(dataset)\r\n\r\n # print('Length of {} dataloader : {}'.format(opt.mode, length))\r\n if mode == 'train':\r\n dataloader = data.DataLoader(dataset=dataset,\r\n batch_size=1,\r\n shuffle=True,\r\n pin_memory=True,\r\n num_workers=opt.num_workers)\r\n elif mode == 'valid':\r\n dataloader = data.DataLoader(dataset=dataset,\r\n batch_size=opt.batch_size,\r\n shuffle=False,\r\n pin_memory=True,\r\n num_workers=opt.num_workers)\r\n \r\n return dataloader"
] | [
[
"torch.utils.data.DataLoader",
"numpy.zeros",
"numpy.resize",
"numpy.expand_dims",
"torch.from_numpy",
"torch.LongTensor",
"numpy.random.randint"
]
] |
pinsleepe/great_expectations | [
"37329c906a5a159b54257dbcd897850177eecbcc"
] | [
"tests/execution_engine/test_sparkdf_execution_engine.py"
] | [
"import datetime\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch_spec import (\n AzureBatchSpec,\n GCSBatchSpec,\n PathBatchSpec,\n RuntimeDataBatchSpec,\n S3BatchSpec,\n)\nfrom great_expectations.execution_engine import SparkDFExecutionEngine\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.expectations.row_conditions import (\n RowCondition,\n RowConditionParserType,\n)\nfrom great_expectations.self_check.util import build_spark_engine\nfrom great_expectations.validator.metric_configuration import MetricConfiguration\nfrom tests.expectations.test_util import get_table_columns_metric\nfrom tests.test_utils import create_files_in_directory\n\ntry:\n pyspark = pytest.importorskip(\"pyspark\")\n # noinspection PyPep8Naming\n import pyspark.sql.functions as F\n from pyspark.sql.types import IntegerType, LongType, Row, StringType\nexcept ImportError:\n pyspark = None\n F = None\n IntegerType = None\n LongType = None\n StringType = None\n Row = None\n\n\[email protected]\ndef test_sparkdf(spark_session):\n def generate_ascending_list_of_datetimes(\n n, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)\n ):\n start_time = datetime.datetime(\n start_date.year, start_date.month, start_date.day\n )\n seconds_between_dates = (end_date - start_date).total_seconds()\n # noinspection PyUnusedLocal\n datetime_list = [\n start_time\n + datetime.timedelta(seconds=random.randrange(int(seconds_between_dates)))\n for i in range(n)\n ]\n datetime_list.sort()\n return datetime_list\n\n k = 120\n random.seed(1)\n timestamp_list = generate_ascending_list_of_datetimes(\n n=k, end_date=datetime.date(2020, 1, 31)\n )\n date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]\n\n # noinspection PyUnusedLocal\n batch_ids = [random.randint(0, 10) for i in range(k)]\n batch_ids.sort()\n # noinspection PyUnusedLocal\n session_ids = [random.randint(2, 60) for i in range(k)]\n session_ids = [i - random.randint(0, 2) for i in session_ids]\n session_ids.sort()\n\n # noinspection PyUnusedLocal\n spark_df = spark_session.createDataFrame(\n data=pd.DataFrame(\n {\n \"id\": range(k),\n \"batch_id\": batch_ids,\n \"date\": date_list,\n \"y\": [d.year for d in date_list],\n \"m\": [d.month for d in date_list],\n \"d\": [d.day for d in date_list],\n \"timestamp\": timestamp_list,\n \"session_ids\": session_ids,\n \"event_type\": [\n random.choice([\"start\", \"stop\", \"continue\"]) for i in range(k)\n ],\n \"favorite_color\": [\n \"#\"\n + \"\".join(\n [random.choice(list(\"0123456789ABCDEF\")) for j in range(6)]\n )\n for i in range(k)\n ],\n }\n )\n )\n spark_df = spark_df.withColumn(\n \"timestamp\", F.col(\"timestamp\").cast(IntegerType()).cast(StringType())\n )\n return spark_df\n\n\[email protected]\ndef spark_df_from_pandas_df():\n \"\"\"\n Construct a spark dataframe from pandas dataframe.\n Returns:\n Function that can be used in your test e.g.:\n spark_df = spark_df_from_pandas_df(spark_session, pandas_df)\n \"\"\"\n\n def _construct_spark_df_from_pandas(\n spark_session,\n pandas_df,\n ):\n\n spark_df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pandas_df.to_records(index=False)\n ],\n pandas_df.columns.tolist(),\n )\n return spark_df\n\n return _construct_spark_df_from_pandas\n\n\ndef test_reader_fn(spark_session, basic_spark_df_execution_engine):\n engine = basic_spark_df_execution_engine\n # Testing that can recognize basic csv file\n fn = engine._get_reader_fn(reader=spark_session.read, path=\"myfile.csv\")\n assert \"<bound method DataFrameReader.csv\" in str(fn)\n\n # Ensuring that other way around works as well - reader_method should always override path\n fn_new = engine._get_reader_fn(reader=spark_session.read, reader_method=\"csv\")\n assert \"<bound method DataFrameReader.csv\" in str(fn_new)\n\n\ndef test_reader_fn_parameters(\n spark_session, basic_spark_df_execution_engine, tmp_path_factory\n):\n base_directory = str(tmp_path_factory.mktemp(\"test_csv\"))\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test-A.csv\",\n ],\n )\n test_df_small_csv_path = base_directory + \"/test-A.csv\"\n engine = basic_spark_df_execution_engine\n fn = engine._get_reader_fn(reader=spark_session.read, path=test_df_small_csv_path)\n assert \"<bound method DataFrameReader.csv\" in str(fn)\n\n test_sparkdf_with_header_param = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=test_df_small_csv_path,\n data_asset_name=\"DATA_ASSET\",\n reader_options={\"header\": True},\n )\n ).dataframe\n assert test_sparkdf_with_header_param.head() == Row(x=\"1\", y=\"2\")\n\n test_sparkdf_with_no_header_param = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(path=test_df_small_csv_path, data_asset_name=\"DATA_ASSET\")\n ).dataframe\n assert test_sparkdf_with_no_header_param.head() == Row(_c0=\"x\", _c1=\"y\")\n\n\ndef test_get_domain_records_with_column_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"b\")<5',\n \"condition_parser\": \"great_expectations__experimental__\",\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:3]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_column_domain_and_filter_conditions(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"b\")<5',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"filter_conditions\": [\n RowCondition(\n condition=\"b IS NOT NULL\",\n condition_type=RowConditionParserType.SPARK_SQL,\n )\n ],\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:3]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_different_column_domain_and_filter_conditions(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"a\")<2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"filter_conditions\": [\n RowCondition(\n condition=\"b IS NOT NULL\",\n condition_type=RowConditionParserType.SPARK_SQL,\n )\n ],\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:1]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_different_column_domain_and_multiple_filter_conditions(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"a\")<10',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"filter_conditions\": [\n RowCondition(\n condition=\"b IS NOT NULL\",\n condition_type=RowConditionParserType.SPARK_SQL,\n ),\n RowCondition(\n condition=\"NOT isnan(b)\",\n condition_type=RowConditionParserType.SPARK_SQL,\n ),\n ],\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:4]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_column_pair_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"row_condition\": 'col(\"b\")>2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"both_values_are_missing\",\n }\n )\n\n expected_column_pair_pd_df = pd.DataFrame(\n {\"a\": [2, 3, 4, 6], \"b\": [3.0, 4.0, 5.0, 6.0], \"c\": [2.0, 3.0, 4.0, None]}\n )\n expected_column_pair_df = spark_df_from_pandas_df(\n spark_session, expected_column_pair_pd_df\n )\n\n assert dataframes_equal(\n data, expected_column_pair_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n \"row_condition\": 'col(\"b\")>2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"either_value_is_missing\",\n }\n )\n for column_name in data.columns:\n data = data.withColumn(column_name, data[column_name].cast(LongType()))\n\n expected_column_pair_pd_df = pd.DataFrame(\n {\"a\": [2, 3, 4], \"b\": [3, 4, 5], \"c\": [2, 3, 4]}\n )\n expected_column_pair_df = spark_df_from_pandas_df(\n spark_session, expected_column_pair_pd_df\n )\n\n assert dataframes_equal(\n data, expected_column_pair_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n \"row_condition\": 'col(\"a\")<6',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"neither\",\n }\n )\n\n expected_column_pair_pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5],\n \"b\": [2.0, 3.0, 4.0, 5.0, None],\n \"c\": [1.0, 2.0, 3.0, 4.0, 5.0],\n }\n )\n expected_column_pair_df = spark_df_from_pandas_df(\n spark_session, expected_column_pair_pd_df\n )\n\n assert dataframes_equal(\n data, expected_column_pair_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_multicolumn_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, 5],\n \"b\": [2, 3, 4, 5, 6, 7],\n \"c\": [1, 2, 3, 4, None, 6],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_list\": [\"a\", \"c\"],\n \"row_condition\": 'col(\"b\")>2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"all_values_are_missing\",\n }\n )\n for column_name in data.columns:\n data = data.withColumn(column_name, data[column_name].cast(LongType()))\n\n expected_multicolumn_pd_df = pd.DataFrame(\n {\"a\": [2, 3, 4, 5], \"b\": [3, 4, 5, 7], \"c\": [2, 3, 4, 6]}, index=[0, 1, 2, 4]\n )\n expected_multicolumn_df = spark_df_from_pandas_df(\n spark_session, expected_multicolumn_pd_df\n )\n\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=expected_multicolumn_df)\n\n assert dataframes_equal(\n data, expected_multicolumn_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_list\": [\"b\", \"c\"],\n \"row_condition\": 'col(\"a\")<5',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"any_value_is_missing\",\n }\n )\n for column_name in data.columns:\n data = data.withColumn(column_name, data[column_name].cast(LongType()))\n\n expected_multicolumn_pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, 5], \"c\": [1, 2, 3, 4]}, index=[0, 1, 2, 3]\n )\n\n expected_multicolumn_df = spark_df_from_pandas_df(\n spark_session, expected_multicolumn_pd_df\n )\n\n assert dataframes_equal(\n data, expected_multicolumn_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, 5],\n \"b\": [2, 3, 4, 5, 6, 7],\n \"c\": [1, 2, 3, 4, None, 6],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_list\": [\"b\", \"c\"],\n \"ignore_row_if\": \"never\",\n }\n )\n\n expected_multicolumn_pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, 5],\n \"b\": [2, 3, 4, 5, 6, 7],\n \"c\": [1, 2, 3, 4, None, 6],\n },\n index=[0, 1, 2, 3, 4, 5],\n )\n\n expected_multicolumn_df = spark_df_from_pandas_df(\n spark_session, expected_multicolumn_pd_df\n )\n\n assert dataframes_equal(\n data, expected_multicolumn_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_compute_domain_with_no_domain_kwargs(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={}, domain_type=MetricDomainTypes.TABLE\n )\n assert compute_kwargs is not None, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {}\n assert data.schema == df.schema\n assert data.collect() == df.collect()\n\n\ndef test_get_compute_domain_with_column_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column\": \"a\"}, domain_type=MetricDomainTypes.COLUMN\n )\n assert compute_kwargs is not None, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {\"column\": \"a\"}\n assert data.schema == df.schema\n assert data.collect() == df.collect()\n\n\ndef test_get_compute_domain_with_row_condition(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n expected_df = df.filter(F.col(\"b\") > 2)\n\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"row_condition\": \"b > 2\", \"condition_parser\": \"spark\"},\n domain_type=MetricDomainTypes.TABLE,\n )\n # Ensuring data has been properly queried\n assert data.schema == expected_df.schema\n assert data.collect() == expected_df.collect()\n\n # Ensuring compute kwargs have not been modified\n assert (\n \"row_condition\" in compute_kwargs.keys()\n ), \"Row condition should be located within compute kwargs\"\n assert accessor_kwargs == {}\n\n\n# What happens when we filter such that no value meets the condition?\ndef test_get_compute_domain_with_unmeetable_row_condition(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n expected_df = df.filter(F.col(\"b\") > 24)\n\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"row_condition\": \"b > 24\", \"condition_parser\": \"spark\"},\n domain_type=MetricDomainTypes.TABLE,\n )\n # Ensuring data has been properly queried\n assert data.schema == expected_df.schema\n assert data.collect() == expected_df.collect()\n\n # Ensuring compute kwargs have not been modified\n assert \"row_condition\" in compute_kwargs.keys()\n assert accessor_kwargs == {}\n\n\ndef test_basic_setup(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"x\": range(10)})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n batch_data = basic_spark_df_execution_engine.get_batch_data(\n batch_spec=RuntimeDataBatchSpec(\n batch_data=df,\n data_asset_name=\"DATA_ASSET\",\n )\n ).dataframe\n assert batch_data is not None\n\n\ndef test_get_batch_data(test_sparkdf, basic_spark_df_execution_engine):\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(batch_data=test_sparkdf, data_asset_name=\"DATA_ASSET\")\n ).dataframe\n assert test_sparkdf.count() == 120\n assert len(test_sparkdf.columns) == 10\n\n\ndef test_get_batch_empty_splitter(\n test_folder_connection_path_csv, basic_spark_df_execution_engine\n):\n # reader_method not configured because spark will configure own reader by default\n # reader_options are needed to specify the fact that the first line of test file is the header\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_csv, \"test.csv\"),\n reader_options={\"header\": True},\n splitter_method=None,\n )\n ).dataframe\n assert test_sparkdf.count() == 5\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_empty_splitter_tsv(\n test_folder_connection_path_tsv, basic_spark_df_execution_engine\n):\n # reader_method not configured because spark will configure own reader by default\n # reader_options are needed to specify the fact that the first line of test file is the header\n # reader_options are also needed to specify the separator (otherwise, comma will be used as the default separator)\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_tsv, \"test.tsv\"),\n reader_options={\"header\": True, \"sep\": \"\\t\"},\n splitter_method=None,\n )\n ).dataframe\n assert test_sparkdf.count() == 5\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_empty_splitter_parquet(\n test_folder_connection_path_parquet, basic_spark_df_execution_engine\n):\n # Note: reader method and reader_options are not needed, because\n # SparkDFExecutionEngine automatically determines the file type as well as the schema of the Parquet file.\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_parquet, \"test.parquet\"),\n splitter_method=None,\n )\n ).dataframe\n assert test_sparkdf.count() == 5\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_runtime(\n test_sparkdf, basic_spark_df_execution_engine\n):\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf, splitter_method=\"_split_on_whole_table\"\n )\n ).dataframe\n assert test_sparkdf.count() == 120\n assert len(test_sparkdf.columns) == 10\n\n\ndef test_get_batch_with_split_on_whole_table_filesystem(\n test_folder_connection_path_csv, basic_spark_df_execution_engine\n):\n # reader_method not configured because spark will configure own reader by default\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_csv, \"test.csv\"),\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 6\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_s3(\n spark_session, basic_spark_df_execution_engine\n):\n # noinspection PyUnusedLocal\n def mocked_get_reader_function(*args, **kwargs):\n # noinspection PyUnusedLocal,PyShadowingNames\n def mocked_reader_function(*args, **kwargs):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pd_df.to_records(index=False)\n ],\n pd_df.columns.tolist(),\n )\n return df\n\n return mocked_reader_function\n\n spark_engine = basic_spark_df_execution_engine\n spark_engine._get_reader_fn = mocked_get_reader_function\n\n test_sparkdf = spark_engine.get_batch_data(\n S3BatchSpec(\n path=\"s3://bucket/test/test.csv\",\n reader_method=\"csv\",\n reader_options={\"header\": True},\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 4\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_azure(\n spark_session, basic_spark_df_execution_engine\n):\n # noinspection PyUnusedLocal\n def mocked_get_reader_function(*args, **kwargs):\n # noinspection PyUnusedLocal,PyShadowingNames\n def mocked_reader_function(*args, **kwargs):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pd_df.to_records(index=False)\n ],\n pd_df.columns.tolist(),\n )\n return df\n\n return mocked_reader_function\n\n spark_engine = basic_spark_df_execution_engine\n spark_engine._get_reader_fn = mocked_get_reader_function\n\n test_sparkdf = spark_engine.get_batch_data(\n AzureBatchSpec(\n path=\"wasbs://test_container@test_account.blob.core.windows.net/test_dir/test_file.csv\",\n reader_method=\"csv\",\n reader_options={\"header\": True},\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 4\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_gcs(\n spark_session, basic_spark_df_execution_engine\n):\n # noinspection PyUnusedLocal\n def mocked_get_reader_function(*args, **kwargs):\n # noinspection PyUnusedLocal,PyShadowingNames\n def mocked_reader_function(*args, **kwargs):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pd_df.to_records(index=False)\n ],\n pd_df.columns.tolist(),\n )\n return df\n\n return mocked_reader_function\n\n spark_engine = basic_spark_df_execution_engine\n spark_engine._get_reader_fn = mocked_get_reader_function\n\n test_sparkdf = spark_engine.get_batch_data(\n GCSBatchSpec(\n path=\"gcs://bucket/test/test.csv\",\n reader_method=\"csv\",\n reader_options={\"header\": True},\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 4\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_column_value(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_column_value\",\n splitter_kwargs={\n \"column_name\": \"batch_id\",\n \"batch_identifiers\": {\"batch_id\": 2},\n },\n )\n ).dataframe\n assert test_sparkdf.count() == 120\n assert len(test_sparkdf.columns) == 10\n collected = split_df.collect()\n for val in collected:\n assert val.batch_id == 2\n\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_column_value\",\n splitter_kwargs={\n \"column_name\": \"date\",\n \"batch_identifiers\": {\"date\": datetime.date(2020, 1, 30)},\n },\n )\n ).dataframe\n assert split_df.count() == 3\n assert len(split_df.columns) == 10\n\n\ndef test_get_batch_with_split_on_converted_datetime(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_converted_datetime\",\n splitter_kwargs={\n \"column_name\": \"timestamp\",\n \"batch_identifiers\": {\"timestamp\": \"2020-01-03\"},\n },\n )\n ).dataframe\n assert split_df.count() == 2\n assert len(split_df.columns) == 10\n\n\ndef test_get_batch_with_split_on_divided_integer(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_divided_integer\",\n splitter_kwargs={\n \"column_name\": \"id\",\n \"divisor\": 10,\n \"batch_identifiers\": {\"id\": 5},\n },\n )\n ).dataframe\n assert split_df.count() == 10\n assert len(split_df.columns) == 10\n max_result = split_df.select([F.max(\"id\")])\n assert max_result.collect()[0][\"max(id)\"] == 59\n min_result = split_df.select([F.min(\"id\")])\n assert min_result.collect()[0][\"min(id)\"] == 50\n\n\ndef test_get_batch_with_split_on_mod_integer(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_mod_integer\",\n splitter_kwargs={\n \"column_name\": \"id\",\n \"mod\": 10,\n \"batch_identifiers\": {\"id\": 5},\n },\n )\n ).dataframe\n\n assert split_df.count() == 12\n assert len(split_df.columns) == 10\n max_result = split_df.select([F.max(\"id\")])\n assert max_result.collect()[0][\"max(id)\"] == 115\n min_result = split_df.select([F.min(\"id\")])\n assert min_result.collect()[0][\"min(id)\"] == 5\n\n\ndef test_get_batch_with_split_on_multi_column_values(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_multi_column_values\",\n splitter_kwargs={\n \"column_names\": [\"y\", \"m\", \"d\"],\n \"batch_identifiers\": {\n \"y\": 2020,\n \"m\": 1,\n \"d\": 5,\n },\n },\n )\n ).dataframe\n assert split_df.count() == 4\n assert len(split_df.columns) == 10\n collected = split_df.collect()\n for val in collected:\n assert val.date == datetime.date(2020, 1, 5)\n\n with pytest.raises(ValueError):\n # noinspection PyUnusedLocal\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_multi_column_values\",\n splitter_kwargs={\n \"column_names\": [\"I\", \"dont\", \"exist\"],\n \"batch_identifiers\": {\n \"y\": 2020,\n \"m\": 1,\n \"d\": 5,\n },\n },\n )\n ).dataframe\n\n\ndef test_get_batch_with_split_on_hashed_column_incorrect_hash_function_name(\n test_sparkdf,\n basic_spark_df_execution_engine,\n):\n with pytest.raises(ge_exceptions.ExecutionEngineError):\n # noinspection PyUnusedLocal\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_hashed_column\",\n splitter_kwargs={\n \"column_name\": \"favorite_color\",\n \"hash_digits\": 1,\n \"hash_function_name\": \"I_wont_work\",\n \"batch_identifiers\": {\n \"hash_value\": \"a\",\n },\n },\n )\n ).dataframe\n\n\ndef test_get_batch_with_split_on_hashed_column(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_hashed_column\",\n splitter_kwargs={\n \"column_name\": \"favorite_color\",\n \"hash_digits\": 1,\n \"hash_function_name\": \"sha256\",\n \"batch_identifiers\": {\n \"hash_value\": \"a\",\n },\n },\n )\n ).dataframe\n assert split_df.count() == 8\n assert len(split_df.columns) == 10\n\n\n# ### Sampling methods ###\ndef test_get_batch_empty_sampler(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(batch_data=test_sparkdf, sampling_method=None)\n ).dataframe\n assert sampled_df.count() == 120\n assert len(sampled_df.columns) == 10\n\n\ndef test_sample_using_random(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf, sampling_method=\"_sample_using_random\"\n )\n ).dataframe\n # The test dataframe contains 10 columns and 120 rows.\n assert len(sampled_df.columns) == 10\n assert 0 <= sampled_df.count() <= 120\n # The sampling probability \"p\" used in \"SparkDFExecutionEngine._sample_using_random()\" is 0.1 (the equivalent of an\n # unfair coin with the 10% chance of coming up as \"heads\"). Hence, we should never get as much as 20% of the rows.\n assert sampled_df.count() < 25\n\n\ndef test_sample_using_mod(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_mod\",\n sampling_kwargs={\n \"column_name\": \"id\",\n \"mod\": 5,\n \"value\": 4,\n },\n )\n ).dataframe\n assert sampled_df.count() == 24\n assert len(sampled_df.columns) == 10\n\n\ndef test_sample_using_a_list(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_a_list\",\n sampling_kwargs={\n \"column_name\": \"id\",\n \"value_list\": [3, 5, 7, 11],\n },\n )\n ).dataframe\n assert sampled_df.count() == 4\n assert len(sampled_df.columns) == 10\n\n\ndef test_sample_using_md5_wrong_hash_function_name(\n test_sparkdf, basic_spark_df_execution_engine\n):\n with pytest.raises(ge_exceptions.ExecutionEngineError):\n # noinspection PyUnusedLocal\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_hash\",\n sampling_kwargs={\n \"column_name\": \"date\",\n \"hash_function_name\": \"I_wont_work\",\n },\n )\n ).dataframe\n\n\ndef test_sample_using_md5(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_hash\",\n sampling_kwargs={\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n },\n )\n ).dataframe\n assert sampled_df.count() == 10\n assert len(sampled_df.columns) == 10\n\n collected = sampled_df.collect()\n for val in collected:\n assert val.date in [datetime.date(2020, 1, 15), datetime.date(2020, 1, 29)]\n\n\ndef test_split_on_multi_column_values_and_sample_using_random(\n test_sparkdf, basic_spark_df_execution_engine\n):\n returned_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_multi_column_values\",\n splitter_kwargs={\n \"column_names\": [\"y\", \"m\", \"d\"],\n \"batch_identifiers\": {\n \"y\": 2020,\n \"m\": 1,\n \"d\": 5,\n },\n },\n sampling_method=\"_sample_using_random\",\n sampling_kwargs={\n \"p\": 0.5,\n },\n )\n ).dataframe\n\n # The test dataframe contains 10 columns and 120 rows.\n assert len(returned_df.columns) == 10\n # The number of returned rows corresponding to the value of \"batch_identifiers\" above is 4.\n assert 0 <= returned_df.count() <= 4\n # The sampling probability \"p\" used in \"SparkDFExecutionEngine._sample_using_random()\" is 0.5 (the equivalent of a\n # fair coin with the 50% chance of coming up as \"heads\"). Hence, on average we should get 50% of the rows, which is\n # 2; however, for such a small sample (of 4 rows), the number of rows returned by an individual run can deviate from\n # this average. Still, in the majority of trials, the number of rows should not be fewer than 2 or greater than 3.\n # The assertion in the next line, supporting this reasoning, is commented out to insure zero failures. Developers\n # are encouraged to uncomment it, whenever the \"_sample_using_random\" feature is the main focus of a given effort.\n # assert 2 <= returned_df.count() <= 3\n\n for val in returned_df.collect():\n assert val.date == datetime.date(2020, 1, 5)\n\n\ndef test_add_column_row_condition(spark_session, basic_spark_df_execution_engine):\n df = pd.DataFrame({\"foo\": [1, 2, 3, 3, None, 2, 3, 4, 5, 6]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in df.to_records(index=False)\n ],\n df.columns.tolist(),\n )\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n domain_kwargs = {\"column\": \"foo\"}\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=True, filter_nan=False\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"foo IS NOT NULL\", condition_type=RowConditionParserType.SPARK_SQL\n )\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (2,), (3,), (4,), (5,), (6,)]\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=True, filter_nan=True\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"foo IS NOT NULL\", condition_type=RowConditionParserType.SPARK_SQL\n ),\n RowCondition(\n condition=\"NOT isnan(foo)\", condition_type=RowConditionParserType.SPARK_SQL\n ),\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (2,), (3,), (4,), (5,), (6,)]\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=False, filter_nan=True\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"NOT isnan(foo)\", condition_type=RowConditionParserType.SPARK_SQL\n )\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (None,), (2,), (3,), (4,), (5,), (6,)]\n\n # This time, our skip value *will* be nan\n df = pd.DataFrame({\"foo\": [1, 2, 3, 3, None, 2, 3, 4, 5, 6]})\n df = spark_session.createDataFrame(df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=False, filter_nan=True\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"NOT isnan(foo)\", condition_type=RowConditionParserType.SPARK_SQL\n )\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (2,), (3,), (4,), (5,), (6,)]\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=True, filter_nan=False\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"foo IS NOT NULL\", condition_type=RowConditionParserType.SPARK_SQL\n ),\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n expected = [(1,), (2,), (3,), (3,), (np.nan,), (2,), (3,), (4,), (5,), (6,)]\n # since nan != nan by default\n assert np.allclose(res, expected, rtol=0, atol=0, equal_nan=True)\n\n\n# Function to test for spark dataframe equality\ndef dataframes_equal(first_table, second_table):\n if first_table.schema != second_table.schema:\n return False\n if first_table.collect() != second_table.collect():\n return False\n return True\n\n\n# Ensuring that, given aggregate metrics, they can be properly bundled together\ndef test_sparkdf_batch_aggregate_metrics(caplog, spark_session):\n import datetime\n\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]},\n ),\n batch_id=\"1234\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_1,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_2,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_3,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_4,\n \"table.columns\": table_columns_metric,\n },\n )\n start = datetime.datetime.now()\n caplog.clear()\n caplog.set_level(logging.DEBUG, logger=\"great_expectations\")\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n end = datetime.datetime.now()\n print(end - start)\n assert metrics[desired_metric_1.id] == 3\n assert metrics[desired_metric_2.id] == 1\n assert metrics[desired_metric_3.id] == 4\n assert metrics[desired_metric_4.id] == 4\n\n # Check that all four of these metrics were computed on a single domain\n found_message = False\n for record in caplog.records:\n if (\n record.message\n == \"SparkDFExecutionEngine computed 4 metrics on domain_id ()\"\n ):\n found_message = True\n assert found_message\n\n\n# Ensuring functionality of compute_domain when no domain kwargs are given\ndef test_get_compute_domain_with_no_domain_kwargs_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={}, domain_type=\"table\"\n )\n\n # Ensuring that with no domain nothing happens to the data itself\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {}, \"Accessor kwargs have been modified\"\n\n\n# Testing for only untested use case - multicolumn\ndef test_get_compute_domain_with_column_pair(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column_A\": \"a\", \"column_B\": \"b\"}, domain_type=\"column_pair\"\n )\n\n # Ensuring that with no domain nothing happens to the data itself\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n }, \"Accessor kwargs have been modified\"\n\n\n# Testing for only untested use case - multicolumn\ndef test_get_compute_domain_with_multicolumn(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None], \"c\": [1, 2, 3, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column_list\": [\"a\", \"b\", \"c\"]}, domain_type=\"multicolumn\"\n )\n\n # Ensuring that with no domain nothing happens to the data itself\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be empty\"\n assert accessor_kwargs == {\n \"column_list\": [\"a\", \"b\", \"c\"]\n }, \"Accessor kwargs have been modified\"\n\n\n# Testing whether compute domain is properly calculated, but this time obtaining a column\ndef test_get_compute_domain_with_column_domain_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column\": \"a\"}, domain_type=\"column\"\n )\n\n # Ensuring that column domain is now an accessor kwarg, and data remains unmodified\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be empty\"\n assert accessor_kwargs == {\"column\": \"a\"}, \"Accessor kwargs have been modified\"\n\n\n# Using an unmeetable row condition to see if empty dataset will result in errors\ndef test_get_domain_records_with_row_condition_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n expected_df = df.where(\"b > 2\")\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n data = engine.get_domain_records(\n domain_kwargs={\n \"row_condition\": \"b > 2\",\n \"condition_parser\": \"spark\",\n }\n )\n\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n\n# What happens when we filter such that no value meets the condition?\ndef test_get_domain_records_with_unmeetable_row_condition_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n expected_df = df.where(\"b > 24\")\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n data = engine.get_domain_records(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"spark\",\n }\n )\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n # Ensuring errors for column and column_ pair domains are caught\n with pytest.raises(ge_exceptions.GreatExpectationsError):\n # noinspection PyUnusedLocal\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"spark\",\n },\n domain_type=\"column\",\n )\n with pytest.raises(ge_exceptions.GreatExpectationsError) as g:\n # noinspection PyUnusedLocal\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"spark\",\n },\n domain_type=\"column_pair\",\n )\n\n\n# Testing to ensure that great expectation experimental parser also works in terms of defining a compute domain\ndef test_get_compute_domain_with_ge_experimental_condition_parser(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n # Filtering expected data based on row condition\n expected_df = df.where(\"b == 2\")\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n # Obtaining data from computation\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\n \"column\": \"b\",\n \"row_condition\": 'col(\"b\") == 2',\n \"condition_parser\": \"great_expectations__experimental__\",\n },\n domain_type=\"column\",\n )\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n # Ensuring compute kwargs have not been modified\n assert (\n \"row_condition\" in compute_kwargs.keys()\n ), \"Row condition should be located within compute kwargs\"\n assert accessor_kwargs == {\"column\": \"b\"}, \"Accessor kwargs have been modified\"\n\n # Should react same for get_domain_records()\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"b\",\n \"row_condition\": 'col(\"b\") == 2',\n \"condition_parser\": \"great_expectations__experimental__\",\n }\n )\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n\ndef test_get_compute_domain_with_nonexistent_condition_parser(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n # Expect GreatExpectationsError because parser doesn't exist\n with pytest.raises(ge_exceptions.GreatExpectationsError):\n # noinspection PyUnusedLocal\n data = engine.get_domain_records(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"nonexistent\",\n },\n )\n\n\n# Ensuring that we can properly inform user when metric doesn't exist - should get a metric provider error\ndef test_resolve_metric_bundle_with_nonexistent_metric(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]},\n ),\n batch_id=\"1234\",\n )\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column_values.unique\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.does_not_exist\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n )\n\n # Ensuring a metric provider error is raised if metric does not exist\n with pytest.raises(ge_exceptions.MetricProviderError) as e:\n # noinspection PyUnusedLocal\n res = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n )\n )\n print(e)\n\n\n# Making sure dataframe property is functional\ndef test_dataframe_property_given_loaded_batch(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 5, 22, 3, 5, 10]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n # Ensuring Data not distorted\n assert engine.dataframe == df\n"
] | [
[
"numpy.allclose",
"pandas.DataFrame",
"numpy.isnan"
]
] |
jnefoussi/pytechfin | [
"4d5bc44410b7161ab3acd65b2474468a84e576af"
] | [
"pytechfin/carol_techfin.py"
] | [
"from collections import defaultdict\nimport pandas as pd\n\n# TODO: Add custom pipeline function from \n# https://github.com/rafarui/techfin-reprocess/blob/master/functions/custom_pipeline.py\n\n# TODO: Add track_tasks function from\n# https://github.com/rafarui/techfin-reprocess/blob/master/functions/carol_task.py\n\nclass CarolTechfin:\n \"\"\" Module to handle Carol's data.\n Needed add in Carol Module\n \"\"\"\n\n def __init__(self, carol):\n self.carol = carol\n\n\n def get_staging_data(self, staging_name, \n connector_name='protheus_carol', merge_records=True, columns=None, callback=None, max_workers=30):\n \"\"\" Get records from a staging table.\n\n Args:\n staging_name: `str`,\n Staging name to fetch parquet of\n merge_records: `bool`, default `True`\n This will keep only the most recent record exported. Sometimes there are updates and/or deletions and\n one should keep only the last records.\n columns: `list`, default `None`\n List of columns to fetch.\n callback: `callable`, default `None`\n Function to be called each downloaded file.\n max_workers: `int` default `30`\n Number of workers to use when downloading parquet files with pandas back-end.\n\n Returns: `pandas.DataFrame`\n DataFrame with the staging data.\n\n \"\"\"\n\n # number of workers to download in parallel\n max_workers=max_workers\n\n # if you want to download a few columns, [\"COLUMNS\", \"TO\", \"FETCH\"]\n col=columns\n\n # maximum records to fetch. P.S.: only works if `max_workers=None`\n max_hits=None \n\n # if metadata should be returned (mdmId, mdmLastUpdated, etc)\n return_metadata = True\n\n # if records with duplicated ids should be consolidated by pyCarol\n merge_records = merge_records\n\n #connector + staging table\n connector_name=connector_name\n staging = staging_name\n\n # file_pattern = '2021-02'\n file_pattern = None\n\n df = self.carol.staging.fetch_parquet(\n staging_name=staging, \n connector_name=connector_name, \n max_workers=max_workers, \n columns=col, \n merge_records=merge_records, \n return_metadata=return_metadata, \n max_hits=max_hits,\n callback=callback, file_pattern=file_pattern)\n\n return df\n\n\n def get_realtime_data(self, datamodel_name):\n \"\"\" Get records from a realtime datamodel\n\n Args:\n datamodel_name: ``str`\n Carol datamodel name\n\n Returns: `pandas.DataFrame`\n DataFrame with the realtime data.\n \"\"\"\n\n filter = {\n \"mustList\": [\n {\n \"mdmFilterType\": \"TYPE_FILTER\",\n \"mdmValue\": datamodel_name+\"Golden\" \n }\n ,\n {\n \"mdmFilterType\": \"TERM_FILTER\",\n \"mdmKey\":\"mdmMergePending\",\n \"mdmValue\": \"false\"\n },\n {\n \"mdmFilterType\": \"RANGE_FILTER\",\n \"mdmKey\": \"mdmCounterForEntity\",\n \"mdmValue\": [0,'null'],\n \"mdmValuesQuery\": {}\n }\n ]\n }\n\n result = self.carol.query(only_hits=True, page_size=1000, print_status=True).query(filter).go().results\n realtime = pd.DataFrame(result)\n\n return realtime\n\n def get_cds_data(self, datamodel_name, merge_records=True, columns = None, return_metadata = False, callback=None, max_workers=30):\n \"\"\"[summary]\n\n Args:\n datamodel_name: `str` optional\n Carol datamodel name\n merge_records: `bool` optional\n Merge cds data. Defaults to True.\n columns: `list of string` optional\n Datamodel's columns. Defaults to None (return all columns).\n return_metadata: `bool` optional \n Return Carol metadata columns. Defaults to False.\n callback: `function` optional\n Callback function to handle data. Defaults to None.\n max_workers: `int` optional\n Number of worker used to process. Defaults to 30.\n\n Returns: `pandas.DataFrame`\n DataFrame with the staging data.\n \"\"\"\n\n df = self.carol.datamodel.fetch_parquet(\n dm_name=datamodel_name, max_workers=max_workers,\n backend='pandas', return_dask_graph=False, columns=columns, merge_records=merge_records, \n return_metadata=return_metadata, max_hits=None, callback=callback , cds=True,\n file_pattern=None)\n\n return df\n\n def get_datamodel_relationship_constraints(self, dm_list=None):\n \"\"\"\n Create relationship between data models based on their relationship constraints\n Args:\n carol: `pycarol.Carol`\n CarolAPI() object.\n prefix: 'str` default `DM_`\n prefix to add to the data model name. e.g., \n if dm_name='mydatamoldel', the result will be \"DM_mydatamoldel`\n Returns: `defaultdict(set)`\n dictionary { \"dm1\" : {\"dm2\": \"field_dm_1\" : \"field_dm_2\"}}\n \"\"\"\n \n # find Relationship Constraints\n if dm_list is None:\n dms = self.carol.datamodel.get_all().template_dict.keys()\n else:\n dms = dm_list\n relationship_constraints = defaultdict(list)\n for i in dms:\n snap = self.carol.datamodel.get_by_name(i)['mdmRelationshipConstraints']\n if snap:\n relationship_constraints[i].append({i[\"mdmTargetEntityName\"]:i[\"mdmSourceTargetFieldName\"] for i in snap})\n return relationship_constraints\n\n def process_staging(self, stagings_list):\n \"\"\" Process a list of staging tables\n\n Args:\n stagings_list `list str`: List of stagings name\n \"\"\"\n\n for staging_name in stagings_list:\n print(f'adding process staging task to staging: {staging_name} ')\n self.carol.cds_staging.process_data(staging_name, connector_name='protheus_carol', recursive_processing=False)\n print(f'see more in https://{self.carol.organization}.{self.carol.environment}/{self.carol.domain}/carol-ui/tasks')\n\n \n def get_carol_record_count(self):\n \"\"\" Get carol record count from tenant explore stats\n\n Returns:\n `dict`\n Dict with datamodels stats\n \"\"\"\n response = self.carol.call_api(path=f'v1/dashboard/exploreStatistics?days=3', method='GET')\n\n return response[\"exploreStats\"]\n"
] | [
[
"pandas.DataFrame"
]
] |
qifwa493/Camera_surface_area | [
"ebac18bd01ba7c615be63626aeb30bd9e07f53bb"
] | [
"versions/V1.2/Components/py_getContour.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Functions for finding a possible contour in the target image\n\nimport cv2\nimport numpy as np\n\n\ndef showImg(winName, mat, Width=None, Height=None):\n # Get image size\n if Width is None or Height is None:\n Height, Width = mat.shape[:2]\n\n # Display image\n cv2.namedWindow(winName, 0)\n cv2.resizeWindow(winName, Width, Height)\n cv2.imshow(winName, mat)\n\n\ndef findContours(Image, MinArcLength=30, Hull=False, Background=True):\n gray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)\n\n if Background:\n black = Image\n else:\n size = gray.shape[:2]\n black = np.zeros([size[0], size[1], 3], dtype=np.uint8)\n\n # Canny edge detection\n gray = cv2.bilateralFilter(gray, 9, 75, 75)\n meat = cv2.Canny(gray, 30, 60, L2gradient=True)\n # kernel = np.ones((7, 7), np.uint8)\n # meat = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)\n\n # Find contours\n # showImg('meat', meat, 900, 600)\n # cv2.waitKey(1)\n contours, hierarchy = cv2.findContours(meat, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n maxContour = contours[0]\n maxLength = 0\n count = 0\n filteredContours = []\n for c in contours:\n length = cv2.arcLength(c, True)\n\n\n if 7000 < length < 7500:\n maxContour = c\n\n\n # Find the long contour\n '''if length > MinArcLength:\n if length > maxLength:\n maxContour = c\n maxLength = length\n\n # Find all the contours that longer than the minimum arc length\n if length > MinArcLength:\n print('Contour ' + str(count) + ': ' + '{:.3f}'.format(length))\n print('Hierarchy: ' + str(hierarchy[0][count]))\n if Hull:\n c = cv2.convexHull(c)\n\n # Draw the contours\n temp = c[0]\n firstPoint = c[0]\n for point in c:\n cv2.line(black, (temp[0][0], temp[0][1]), (point[0][0], point[0][1]), (0, 0, 255), 3, lineType=cv2.LINE_AA)\n temp = point\n cv2.line(black, (temp[0][0], temp[0][1]), (firstPoint[0][0], firstPoint[0][1]), (0, 0, 255), 3, lineType=cv2.LINE_AA)\n\n # black = cv2.drawContours(black, hull, -1, (0, 0, 255), 3)\n\n showImg('temp', black)\n cv2.waitKey(0)\n\n count += 1\n # if count > 4:\n # break\n\n filteredContours.append(c)'''\n\n # Draw the contours\n print('Contour length: ' + '{:.3f}'.format(cv2.arcLength(maxContour, True)))\n temp = maxContour[0]\n firstPoint = maxContour[0]\n for i in range(len(maxContour)):\n point = maxContour[i]\n cv2.line(black, (temp[0][0], temp[0][1]), (point[0][0], point[0][1]), (255, 255, 255), 1)\n temp = point\n cv2.line(black, (temp[0][0], temp[0][1]), (firstPoint[0][0], firstPoint[0][1]), (255, 255, 255), 1)\n\n return black, maxContour\n\n\nif __name__ == '__main__':\n fileName = 'DSC_0631_after.jpg'\n image = cv2.imread(fileName)\n\n res, contours = findContours(image, Hull=False, MinArcLength=1000, Background=False)\n\n # cv2.imwrite(fileName.split('.')[0] + '_edges.jpg', res)\n showImg('res', res, 900, 600)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n"
] | [
[
"numpy.zeros"
]
] |
luoyan407/predict_trustworthiness_smallscale | [
"b7e1e2a68b0aee9b484228d1b5686f7252919e97"
] | [
"confidnet/models/segnet_selfconfid.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom confidnet.models.model import AbstractModel\nfrom confidnet.models.segnet import segnetDown2, segnetDown3, segnetUp2, segnetUp3\n\n\nclass SegnetSelfConfid(AbstractModel):\n def __init__(self, config_args, device):\n super().__init__(config_args, device)\n self.in_channels = config_args[\"data\"][\"input_channels\"]\n self.n_classes = config_args[\"data\"][\"num_classes\"]\n self.is_unpooling = True\n self.dropout = config_args[\"model\"][\"is_dropout\"]\n\n self.down1 = segnetDown2(self.in_channels, 64)\n self.down2 = segnetDown2(64, 128)\n self.down3 = segnetDown3(128, 256)\n self.dropout_down3 = nn.Dropout(0.5)\n self.down4 = segnetDown3(256, 512)\n self.dropout_down4 = nn.Dropout(0.5)\n self.down5 = segnetDown3(512, 512)\n self.dropout_down5 = nn.Dropout(0.5)\n\n self.up5 = segnetUp3(512, 512)\n self.dropout_up5 = nn.Dropout(0.5)\n self.up4 = segnetUp3(512, 256)\n self.dropout_up4 = nn.Dropout(0.4)\n self.up3 = segnetUp3(256, 128)\n self.dropout_up3 = nn.Dropout(0.3)\n self.up2 = segnetUp2(128, 64)\n self.up1 = segnetUp2(64, self.n_classes)\n\n self.unpool_uncertainty = nn.MaxUnpool2d(2, 2)\n self.uncertainty1 = nn.Conv2d(64, 400, 3, 1, 1)\n self.uncertainty2 = nn.Conv2d(400, 120, 3, 1, 1)\n self.uncertainty3 = nn.Conv2d(120, 64, 3, 1, 1)\n self.uncertainty4 = nn.Conv2d(64, 64, 3, 1, 1)\n self.uncertainty5 = nn.Conv2d(64, 1, 3, 1, 1)\n\n def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.down1(inputs)\n down2, indices_2, unpool_shape2 = self.down2(down1)\n down3, indices_3, unpool_shape3 = self.down3(down2)\n if self.dropout:\n if self.mc_dropout:\n down3 = F.dropout(down3, 0.5, training=self.training)\n else:\n down3 = self.dropout_down3(down3)\n down4, indices_4, unpool_shape4 = self.down4(down3)\n if self.dropout:\n if self.mc_dropout:\n down4 = F.dropout(down4, 0.5, training=self.training)\n else:\n down4 = self.dropout_down3(down4)\n down5, indices_5, unpool_shape5 = self.down5(down4)\n if self.dropout:\n if self.mc_dropout:\n down5 = F.dropout(down5, 0.5, training=self.training)\n else:\n down5 = self.dropout_down3(down5)\n\n up5 = self.up5(down5, indices_5, unpool_shape5)\n if self.dropout:\n if self.mc_dropout:\n up5 = F.dropout(up5, 0.5, training=self.training)\n else:\n up5 = self.dropout_up5(up5)\n up4 = self.up4(up5, indices_4, unpool_shape4)\n if self.dropout:\n if self.mc_dropout:\n up4 = F.dropout(up4, 0.5, training=self.training)\n else:\n up4 = self.dropout_up4(up4)\n up3 = self.up3(up4, indices_3, unpool_shape3)\n if self.dropout:\n if self.mc_dropout:\n up3 = F.dropout(up3, 0.5, training=self.training)\n else:\n up3 = self.dropout_up3(up3)\n up2 = self.up2(up3, indices_2, unpool_shape2)\n up1 = self.up1(up2, indices_1, unpool_shape1)\n\n uncertainty = self.unpool_uncertainty(up2, indices_1, unpool_shape1)\n uncertainty = F.relu(self.uncertainty1(uncertainty))\n uncertainty = F.relu(self.uncertainty2(uncertainty))\n uncertainty = F.relu(self.uncertainty3(uncertainty))\n uncertainty = F.relu(self.uncertainty4(uncertainty))\n uncertainty = self.uncertainty5(uncertainty)\n\n return up1, uncertainty\n\n def print_summary(self, input_size):\n pass\n"
] | [
[
"torch.nn.MaxUnpool2d",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"torch.nn.functional.dropout"
]
] |
ChitandaXu/ECG_classify | [
"bffd810dd7c0a03c18dfc58d3150c7b98b528105"
] | [
"ecg_classify/gen_data.py"
] | [
"import numpy as np\nimport os\nimport pandas as pd\nfrom ecg_classify.constants import DIM, heartbeat_factory, CLASS_NUM, TRAIN_SIZE, TEST_SIZE, LABEL_LIST\nfrom ecg_classify.gen_feature import gen_feature\n\n\ndef read_data(force=False):\n if (not (os.path.isfile('train.csv') and os.path.isfile('test.csv'))) or force:\n __write_data(True)\n __write_data(False)\n df_train = pd.read_csv('train.csv')\n df_test = pd.read_csv('test.csv')\n return df_train, df_test\n\n\ndef gen_data(symbol, is_training=True):\n heartbeat = heartbeat_factory(symbol, is_training)\n if is_training:\n num_list = list(heartbeat.keys())\n res = np.empty((4000, DIM), dtype='<U32')\n else:\n num_list = list(heartbeat.keys())\n res = np.empty((1000, DIM), dtype='<U32')\n cur = 0\n for num in num_list:\n feature = gen_feature(num)\n val = heartbeat[num]\n res[cur: cur + val] = feature[feature[:, -1] == symbol][0: val]\n cur = cur + val\n if symbol == 'A' or (symbol == '/' and is_training):\n half = res.shape[0] // 2\n res = res[0: half]\n res = np.concatenate([res, res])\n return res\n\n\ndef gen_label(is_training_set=True):\n if is_training_set:\n scale = TRAIN_SIZE\n else:\n scale = TEST_SIZE\n labels = np.zeros(scale * CLASS_NUM)\n for i in range(CLASS_NUM):\n labels[scale * i: scale * (i + 1)] = i\n return labels\n\n\ndef __write_data(is_training=True):\n if is_training:\n scale = TRAIN_SIZE\n else:\n scale = TEST_SIZE\n res = np.empty((scale * CLASS_NUM, DIM), dtype='<U32')\n for i in range(CLASS_NUM):\n res[scale * i: scale * (i + 1)] = gen_data(LABEL_LIST[i], is_training)\n df = pd.DataFrame(res)\n if is_training:\n df.to_csv(\"train.csv\", index=False)\n else:\n df.to_csv(\"test.csv\", index=False)\n"
] | [
[
"numpy.empty",
"numpy.zeros",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.concatenate"
]
] |
rodrigob/beam | [
"e2ce4037f85619f946b3d6a3a90955cdf1c19b4a"
] | [
"sdks/python/apache_beam/examples/complete/distribopt.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Example illustrating the use of Apache Beam for solving distributing\noptimization tasks.\n\nThis example solves an optimization problem which consists of distributing a\nnumber of crops to grow in several greenhouses. The decision where to grow the\ncrop has an impact on the production parameters associated with the greenhouse,\nwhich affects the total cost of production at the greenhouse. Additionally,\neach crop needs to be transported to a customer so the decision where to grow\nthe crop has an impact on the transportation costs as well.\n\nThis type of optimization problems are known as mixed-integer programs as they\nexist of discrete parameters (do we produce a crop in greenhouse A, B or C?)\nand continuous parameters (the greenhouse production parameters).\n\nRunning this example requires NumPy and SciPy. The input consists of a CSV file\nwith the following columns (Tx representing the transporation cost/unit if the\ncrop is produced in greenhouse x): Crop name, Quantity, Ta, Tb, Tc, ....\n\nExample input file with 5 crops and 3 greenhouses (a transporation cost of 0\nforbids production of the crop in a greenhouse):\nOP01,8,12,0,12\nOP02,30,14,3,12\nOP03,25,7,3,14\nOP04,87,7,2,2\nOP05,19,1,7,10\n\nThe pipeline consists of three phases:\n - Creating a grid of mappings (assignment of each crop to a greenhouse)\n - For each mapping and each greenhouse, optimization of the production\n parameters for cost, addition of the transporation costs, and aggregation\n of the costs for each mapping.\n - Selecting the mapping with the lowest cost.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport logging\nimport string\nimport uuid\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport apache_beam as beam\nfrom apache_beam import pvalue\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom scipy.optimize import minimize\n\n\nclass Simulator(object):\n \"\"\"Greenhouse simulation for the optimization of greenhouse parameters.\"\"\"\n\n def __init__(self, quantities):\n super(Simulator, self).__init__()\n self.quantities = np.atleast_1d(quantities)\n\n self.A = np.array([[3.0, 10, 30],\n [0.1, 10, 35],\n [3.0, 10, 30],\n [0.1, 10, 35]])\n\n self.P = 1e-4 * np.array([[3689, 1170, 2673],\n [4699, 4387, 7470],\n [1091, 8732, 5547],\n [381, 5743, 8828]])\n\n a0 = np.array([[1.0, 1.2, 3.0, 3.2]])\n coeff = np.sum(np.cos(np.dot(a0.T, self.quantities[None, :])), axis=1)\n self.alpha = coeff / np.sum(coeff)\n\n def simulate(self, xc):\n # Map the input parameter to a cost for each crop.\n weighted_distance = np.sum(self.A * np.square(xc - self.P), axis=1)\n f = -np.sum(self.alpha * np.exp(-weighted_distance))\n return np.square(f) * np.log(self.quantities)\n\n\nclass CreateGrid(beam.PTransform):\n \"\"\"A transform for generating the mapping grid.\n\n Input: Formatted records of the input file, e.g.,\n {\n 'crop': 'OP009',\n 'quantity': 102,\n 'transport_costs': [('A', None), ('B', 3), ('C', 8)]\n }\n Output: tuple (mapping_identifier, {crop -> greenhouse})\n \"\"\"\n\n class PreGenerateMappings(beam.DoFn):\n \"\"\"ParDo implementation forming based on two elements a small sub grid.\n\n This facilitates parallellization of the grid generation.\n Emits two PCollections: the subgrid represented as collection of lists of\n two tuples, and a list of remaining records. Both serve as an input to\n GenerateMappings.\n \"\"\"\n\n def process(self, element):\n records = list(element[1])\n # Split of 2 crops and pre-generate the subgrid.\n # Select the crop with highest number of possible greenhouses:\n # in case two crops with only a single possible greenhouse were selected\n # the subgrid would consist of only 1 element.\n best_split = np.argsort([-len(r['transport_costs']) for r in records])[:2]\n rec1 = records[best_split[0]]\n rec2 = records[best_split[1]]\n\n # Generate & emit all combinations\n for a in rec1['transport_costs']:\n if a[1]:\n for b in rec2['transport_costs']:\n if b[1]:\n combination = [(rec1['crop'], a[0]), (rec2['crop'], b[0])]\n yield pvalue.TaggedOutput('splitted', combination)\n\n # Pass on remaining records\n remaining = [rec for i, rec in enumerate(records) if i not in best_split]\n yield pvalue.TaggedOutput('combine', remaining)\n\n class GenerateMappings(beam.DoFn):\n \"\"\"ParDo implementation to generate all possible mappings.\n\n Input: output of PreGenerateMappings\n Output: tuples of the form (mapping_identifier, {crop -> greenhouse})\n \"\"\"\n\n @staticmethod\n def _coordinates_to_greenhouse(coordinates, greenhouses, crops):\n # Map the grid coordinates back to greenhouse labels\n arr = []\n for coord in coordinates:\n arr.append(greenhouses[coord])\n return dict(zip(crops, np.array(arr)))\n\n def process(self, element, records):\n # Generate available greenhouses and grid coordinates for each crop.\n grid_coordinates = []\n for rec in records:\n # Get indices for available greenhouses (w.r.t crops)\n filtered = [i for i, av in enumerate(rec['transport_costs']) if av[1]]\n grid_coordinates.append(filtered)\n\n # Generate all mappings\n grid = np.vstack(list(map(np.ravel, np.meshgrid(*grid_coordinates)))).T\n crops = [rec['crop'] for rec in records]\n greenhouses = [rec[0] for rec in records[0]['transport_costs']]\n for point in grid:\n # translate back to greenhouse label\n mapping = self._coordinates_to_greenhouse(point, greenhouses, crops)\n assert all(rec[0] not in mapping for rec in element)\n # include the incomplete mapping of 2 crops\n mapping.update(element)\n # include identifier\n yield (uuid.uuid4().hex, mapping)\n\n def expand(self, records):\n o = (\n records\n | 'pair one' >> beam.Map(lambda x: (1, x))\n | 'group all records' >> beam.GroupByKey()\n | 'split one of' >> beam.ParDo(self.PreGenerateMappings())\n .with_outputs('splitted', 'combine')\n )\n\n # Create mappings, and prevent fusion (this limits the parallelization\n # in the optimization step)\n mappings = (\n o.splitted\n | 'create mappings' >> beam.ParDo(self.GenerateMappings(),\n pvalue.AsSingleton(o.combine))\n | 'prevent fusion' >> beam.Reshuffle()\n )\n\n return mappings\n\n\nclass OptimizeGrid(beam.PTransform):\n \"\"\"A transform for optimizing all greenhouses of the mapping grid.\"\"\"\n\n class CreateOptimizationTasks(beam.DoFn):\n \"\"\"\n Create tasks for optimization.\n\n Input: (mapping_identifier, {crop -> greenhouse})\n Output: ((mapping_identifier, greenhouse), [(crop, quantity),...])\n \"\"\"\n\n def process(self, element, quantities):\n mapping_identifier, mapping = element\n\n # Create (crop, quantity) lists for each greenhouse\n greenhouses = defaultdict(list)\n for crop, greenhouse in mapping.iteritems():\n quantity = quantities[crop]\n greenhouses[greenhouse].append((crop, quantity))\n\n # Create input for OptimizeProductParameters\n for greenhouse, crops in greenhouses.iteritems():\n key = (mapping_identifier, greenhouse)\n yield (key, crops)\n\n class OptimizeProductParameters(beam.DoFn):\n \"\"\"Solve the optimization task to determine optimal production parameters.\n Input: ((mapping_identifier, greenhouse), [(crop, quantity),...])\n Two outputs:\n - solution: (mapping_identifier, (greenhouse, [production parameters]))\n - costs: (crop, greenhouse, mapping_identifier, cost)\n \"\"\"\n\n @staticmethod\n def _optimize_production_parameters(sim):\n # setup initial starting point & bounds\n x0 = 0.5 * np.ones(3)\n bounds = list(zip(np.zeros(3), np.ones(3)))\n\n # Run L-BFGS-B optimizer\n result = minimize(lambda x: np.sum(sim.simulate(x)), x0, bounds=bounds)\n return result.x.tolist(), sim.simulate(result.x)\n\n def process(self, element):\n mapping_identifier, greenhouse = element[0]\n crops, quantities = zip(*element[1])\n sim = Simulator(quantities)\n optimum, costs = self._optimize_production_parameters(sim)\n solution = (mapping_identifier, (greenhouse, optimum))\n yield pvalue.TaggedOutput('solution', solution)\n for crop, cost, quantity in zip(crops, costs, quantities):\n costs = (crop, greenhouse, mapping_identifier, cost * quantity)\n yield pvalue.TaggedOutput('costs', costs)\n\n def expand(self, inputs):\n mappings, quantities = inputs\n opt = (\n mappings\n | 'optimization tasks' >> beam.ParDo(self.CreateOptimizationTasks(),\n pvalue.AsDict(quantities))\n | 'optimize' >> beam.ParDo(self.OptimizeProductParameters())\n .with_outputs('costs', 'solution')\n )\n return opt\n\n\nclass CreateTransportData(beam.DoFn):\n \"\"\"Transform records to pvalues ((crop, greenhouse), transport_cost)\"\"\"\n\n def process(self, record):\n crop = record['crop']\n for greenhouse, transport_cost in record['transport_costs']:\n yield ((crop, greenhouse), transport_cost)\n\n\ndef add_transport_costs(element, transport, quantities):\n \"\"\"Adds the transport cost for the crop to the production cost.\n\n elements are of the form (crop, greenhouse, mapping, cost), the cost only\n corresponds to the production cost. Return the same format, but including\n the transport cost.\n \"\"\"\n crop = element[0]\n cost = element[3]\n # lookup & compute cost\n transport_key = element[:2]\n transport_cost = transport[transport_key] * quantities[crop]\n return element[:3] + (cost + transport_cost,)\n\n\ndef parse_input(line):\n # Process each line of the input file to a dict representing each crop\n # and the transport costs\n columns = line.split(',')\n\n # Assign each greenhouse a character\n transport_costs = []\n for greenhouse, cost in zip(string.ascii_uppercase, columns[2:]):\n info = (greenhouse, int(cost) if cost else None)\n transport_costs.append(info)\n\n return {\n 'crop': columns[0],\n 'quantity': int(columns[1]),\n 'transport_costs': transport_costs\n }\n\n\ndef format_output(element):\n \"\"\"Transforms the datastructure (unpack lists introduced by CoGroupByKey)\n before writing the result to file.\n \"\"\"\n result = element[1]\n result['cost'] = result['cost'][0]\n result['production'] = dict(result['production'])\n result['mapping'] = result['mapping'][0]\n return result\n\n\ndef run(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--input',\n dest='input',\n required=True,\n help='Input description to process.')\n parser.add_argument('--output',\n dest='output',\n required=True,\n help='Output file to write results to.')\n known_args, pipeline_args = parser.parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n\n with beam.Pipeline(options=pipeline_options) as p:\n # Parse input file\n records = (\n p\n | 'read' >> beam.io.ReadFromText(known_args.input)\n | 'process input' >> beam.Map(parse_input)\n )\n\n # Create two pcollections, used as side inputs\n transport = (\n records\n | 'create transport' >> beam.ParDo(CreateTransportData())\n )\n\n quantities = (\n records\n | 'create quantities' >> beam.Map(lambda r: (r['crop'], r['quantity']))\n )\n\n # Generate all mappings and optimize greenhouse production parameters\n mappings = records | CreateGrid()\n opt = (mappings, quantities) | OptimizeGrid()\n\n # Then add the transport costs and sum costs per crop.\n costs = (\n opt.costs\n | 'include transport' >> beam.Map(add_transport_costs,\n pvalue.AsDict(transport),\n pvalue.AsDict(quantities))\n | 'drop crop and greenhouse' >> beam.Map(lambda x: (x[2], x[3]))\n | 'aggregate crops' >> beam.CombinePerKey(sum)\n )\n\n # Join cost, mapping and production settings solution on mapping identifier.\n # Then select best.\n join_operands = {\n 'cost': costs,\n 'production': opt.solution,\n 'mapping': mappings\n }\n best = (\n join_operands\n | 'join' >> beam.CoGroupByKey()\n | 'select best' >> beam.CombineGlobally(min, key=lambda x: x[1]['cost'])\n .without_defaults()\n | 'format output' >> beam.Map(format_output)\n )\n\n # pylint: disable=expression-not-assigned\n best | 'write optimum' >> beam.io.WriteToText(known_args.output)\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.meshgrid",
"numpy.zeros",
"numpy.exp",
"numpy.atleast_1d",
"numpy.log",
"numpy.array",
"numpy.dot",
"numpy.square"
]
] |
grohalex/Final-Project | [
"41ac4e56e1a688a5f03f81d40d99eb2f839f9a26"
] | [
"Two-Way/stuck_lattice0.py"
] | [
"# first version of two way lattice stuck position heatmap\nimport numpy as np\nimport numpy.random as rd\nimport random as random\nimport scipy\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom os import path\nfrom datetime import datetime\nnow = datetime.now()\n\n#parameters\nN = 100 # number of sites\na1 = 1 # injection probability at lattice 1\na2 = 1 # injection probability at lattice 2\nb1 = 1 # removal probability at lattice 1\nb2 = 1 # removal probability at lattice 2\nk11 = 1 # steping probability for particle 1 in lattice 1\n#k12 = 0.1 # steping probability for particle 1 to lattice 2\n#k21 = 0 # steping probability for particle 2 to lattice 1\nk22 = 1 # steping probability for particle 2 in lattice 2\n\nk12_values = [round(i, 2) for i in np.linspace(0,1,20)]#np.linspace(0,1,11)\nk21_values = np.flip(k12_values)#[round(i, 2) for i in np.linspace(0,1,11)]\nstuck_steps_matrix = np.zeros([ len(k21_values), len(k12_values)]) #init our heat maps\nmax_step = 600\naverages = 50\n#init\nL1 = np.zeros(N) #initialize lattice 1\nL2 = np.zeros(N) #initialize lattice 2\nstep = 0 #init step variable\n#init the current measurement variables 'passed_particles'\npassed_particles1 = 0 #particles which passes 0th site in latice 1\npassed_particles2 = 0 #particles which passed 0th site in latice 2\ncurrent1 = 0\ncurrent2 = 0\n\ndensities1 = np.zeros(N) #average occupation density for each site in lattice 1\ndensities2 = np.zeros(N) #average occupation density for each site in lattice 2\n#densities_a1 = np.zeros(steps_a) # densities corresponding to different initial a\n\n#update ftion\ndef update(i):\n global passed_particles1,passed_particles2\n #insertion a1\n if i==0:\n if L1[0]==0 and rd.rand()<a1:\n L1[0]=1\n passed_particles1 +=1\n #in case there site = 1 there is particle 2 which should leave\n elif i==1 and L1[0]==2 and rd.rand()<b2: #in case there is particle 2 it leaves\n L1[0]=0\n\n #insertion a2\n elif i==N+1:\n if L2[0]==0 and rd.rand()<a2:\n L2[0]=2\n passed_particles2 +=1\n #in case site = 2N+1 and there is particle 1 it leaves\n elif i==N+2 and L2[0]==1 and rd.rand()<b2: #in case there is particle 1 it leaves\n L2[0]=0\n\n #removal b1\n elif i==N and rd.rand()<b1:\n if L1[-1]>0:\n L1[-1]=0\n\n #removal b2\n elif i==2*N+1 and rd.rand()<b2:\n if L2[-1]>0:\n L2[-1]=0\n\n\n #regular site lattice 1\n elif i>0 and i < N:\n i = i-1\n #update particle 1\n if L1[i]==1:\n #make a step\n if L1[i+1]==0 and rd.rand()<k11:\n L1[i]=0\n L1[i+1]=1\n\n #overtake\n elif L1[i+1]>0 and L2[-i-2]==0 and rd.rand()<k12:\n L1[i]=0\n L2[-i-2]=1\n\n #update particle 2\n if L1[i]==2:\n\n #finish overtaking\n if L2[-i]==0 and rd.rand()<k21:\n L1[i]=0\n L2[-i]=2\n if i == int(N/2): ###\n passed_particles1 +=1 ### #adding to the current in the second lattice (super coarse)\n\n\n #continue in the opposite lane\n elif L1[i-1]==0 and rd.rand()<k21:\n L1[i]=0\n L1[i-1]=2\n if i == int(N/2): ###\n passed_particles1 +=1 ### #adding to the current in the second lattice (super coarse)\n\n #regular site lattice 2\n elif i>N and i<2*N+1:\n i = i-N-2\n assert(i>=0 and i<=N)\n\n #update particle 2\n if L2[i]==2:\n #make a step\n if L2[i+1]==0 and rd.rand()<k22:\n L2[i]=0\n L2[i+1]=2\n\n #overtake\n elif L2[i+1]>0 and L1[-i-2]==0 and rd.rand()<k21:\n L2[i]=0\n L1[-i-2]=2\n\n #update particle 1\n if L2[i]==1:\n #finish overtaking\n if L1[-i]==0 and rd.rand()<k12:\n L2[i]=0\n L1[-i]=1\n if i == int(N/2): ###\n passed_particles2 +=1 ### #adding to the current in the second lattice (super coarse)\n\n\n #continue in the opposite lane\n elif L2[i-1]==0 and rd.rand()<k12:\n if not i==0:\n L2[i]=0\n L2[i-1]=1\n\n if i == int(N/2): ###\n passed_particles2 +=1 ### #adding to the current in the second lattice (super coarse)\n\n\n#lets you update the lattice with optional parameters. This is useful in the Stuck_position()\ndef update_par(i, A1, A2, B1, B2, K11, K22, K12, K21):\n\n #insertion a1\n if i==0:\n if L1[0]==0 and rd.rand()<A1:\n L1[0]=1\n return 1\n else:\n return 0\n #in case there site = 1 there is particle 2 which should leave\n elif i==1 and L1[0]==2 and rd.rand()<B2: #in case there is particle 2 it leaves\n L1[0]=0\n return 1\n #insertion a2\n elif i==N+1:\n if L2[0]==0 and rd.rand()<A2:\n L2[0]=2\n return 1\n else:\n return 0\n #in case site = 2N+1 and there is particle 1 it leaves\n elif i==N+2 and L2[0]==1 and rd.rand()<B2: #in case there is particle 1 it leaves\n L2[0]=0\n return 1\n #removal b1\n elif i==N and rd.rand()<B1:\n if L1[-1]>0:\n L1[-1]=0\n return 1\n else:\n return 0\n #removal b2\n elif i==2*N+1 and rd.rand()<B1:\n if L2[-1]>0:\n L2[-1]=0\n return 1\n else:\n return 0\n\n #regular site lattice 1\n elif i>0 and i < N:\n i = i-1\n\n if L1[i]==0: #nothing can change with no particle available\n return 0\n\n #update particle 1\n if L1[i]==1:\n #make a step\n if L1[i+1]==0 and rd.rand()<K11:\n L1[i]=0\n L1[i+1]=1\n return 1\n\n #overtake\n elif L1[i+1]>0 and L2[-i-2]==0 and rd.rand()<K12:\n L1[i]=0\n L2[-i-2]=1\n\n return 1\n else:\n return 0\n #update particle 2\n if L1[i]==2:\n\n if L2[i]==0: #nothing can change with no particle available\n return 0\n\n #finish overtaking\n if L2[-i]==0 and rd.rand()<K21:\n L1[i]=0\n L2[-i]=2\n return 1\n\n #continue in the opposite lane\n elif L1[i-1]==0 and rd.rand()<K21:\n L1[i]=0\n L1[i-1]=2\n return 1\n else:\n return 0\n\n\n\n #regular site lattice 2\n elif i>N:\n i = i-N-2\n assert(i>=0 and i<=N)\n\n #update particle 2\n if L2[i]==2:\n #make a step\n if L2[i+1]==0 and rd.rand()<K22:\n L2[i]=0\n L2[i+1]=2\n return 1\n #overtake\n elif L2[i+1]>0 and L1[-i-2]==0 and rd.rand()<K21:\n L2[i]=0\n L1[-i-2]=2\n return 1\n else:\n return 0\n #update particle 1\n elif L2[i]==1:\n #finish overtaking\n if L1[-i]==0 and rd.rand()<K12:\n L2[i]=0\n L1[-i]=1\n return 1\n #continue in the opposite lane\n elif L2[i-1]==0 and rd.rand()<K12:\n if not i==0:\n L2[i]=0\n L2[i-1]=1\n return 1\n else:\n return 0\n else:\n return 0\n else:\n return 0\n\n#does not update the lattice but it just outputs bool whether a move is possible. This is useful in the Stuck_position()\ndef update_bool(i, A1, A2, B1, B2, K11, K22, K12, K21):\n\n #insertion a1\n if i==0:\n if L1[0]==0 and rd.rand()<A1:\n #L1[0]=1\n return 1\n else:\n return 0\n #in case there site = 1 there is particle 2 which should leave\n elif i==1 and L1[0]==2 and rd.rand()<B2: #in case there is particle 2 it leaves\n #L1[0]=0\n return 1\n #insertion a2\n elif i==N+1:\n if L2[0]==0 and rd.rand()<A2:\n #L2[0]=2\n return 1\n else:\n return 0\n #in case site = 2N+1 and there is particle 1 it leaves\n elif i==N+2 and L2[0]==1 and rd.rand()<B2: #in case there is particle 1 it leaves\n #L2[0]=0\n return 1\n #removal b1\n elif i==N and rd.rand()<B1:\n if L1[-1]>0:\n #L1[-1]=0\n return 1\n else:\n return 0\n #removal b2\n elif i==2*N+1 and rd.rand()<B1:\n if L2[-1]>0:\n #L2[-1]=0\n return 1\n else:\n return 0\n\n #regular site lattice 1\n elif i>0 and i < N:\n i = i-1\n\n if L1[i]==0: #nothing can change with no particle available\n return 0\n\n #update particle 1\n if L1[i]==1:\n #make a step\n if L1[i+1]==0 and rd.rand()<K11:\n #L1[i]=0\n #L1[i+1]=1\n return 1\n\n #overtake\n elif L1[i+1]>0 and L2[-i-2]==0 and rd.rand()<K12:\n #L1[i]=0\n #L2[-i-2]=1\n\n return 1\n else:\n return 0\n #update particle 2\n if L1[i]==2:\n\n if L2[i]==0: #nothing can change with no particle available\n return 0\n\n #finish overtaking\n if L2[-i]==0 and rd.rand()<K21:\n #L1[i]=0\n #L2[-i]=2\n return 1\n\n #continue in the opposite lane\n elif L1[i-1]==0 and rd.rand()<K21:\n #L1[i]=0\n #L1[i-1]=2\n return 1\n else:\n return 0\n\n\n\n #regular site lattice 2\n elif i>N:\n i = i-N-2\n assert(i>=0 and i<=N)\n\n #update particle 2\n if L2[i]==2:\n #make a step\n if L2[i+1]==0 and rd.rand()<K22:\n #L2[i]=0\n #L2[i+1]=2\n return 1\n #overtake\n elif L2[i+1]>0 and L1[-i-2]==0 and rd.rand()<K21:\n #L2[i]=0\n #L1[-i-2]=2\n return 1\n else:\n return 0\n #update particle 1\n elif L2[i]==1:\n #finish overtaking\n if L1[-i]==0 and rd.rand()<K12:\n #L2[i]=0\n #L1[-i]=1\n return 1\n #continue in the opposite lane\n elif L2[i-1]==0 and rd.rand()<K12:\n if not i==0:\n #L2[i]=0\n #L2[i-1]=1\n return 1\n else:\n return 0\n else:\n return 0\n else:\n return 0\n\n#display both lattices, in the correct orientation\ndef Display():\n l1 = L1\n l2 = np.flip(L2) #L2 goes in the opposite direction\n\n print(l1)\n print(l2)\n\n#another way of displaying my two lattices\ndef DisplayNice():\n l1 = L1\n l2 = np.flip(L2) #L2 goes in the opposite direction\n\n for i in l1:\n print('|', end = '')\n if i==1:\n print('o>', end = '')\n if i==2:\n print('<o', end = '')\n elif i==0:\n print(' ', end = '')\n #print('|', end = '')\n print('|', end = '')\n print('\\n')\n\n for i in l2:\n print('|', end = '')\n if i==1:\n print('o>', end = '')\n if i==2:\n print('<o', end = '')\n elif i==0:\n print(' ', end = '')\n #print('|', end = '')\n print('|', end = '')\n print('\\n')\n\n#determines whether lattice is in a stuck position, returns a bool, True=lattice is stuck\ndef stuck_position():\n unstuck = 0\n for i in range(2*N+2):\n unstuck = unstuck + update_bool(i, 1,1,1,1,1,1,1,1)\n if unstuck > 0:\n return False\n if unstuck == 0:\n return True\n\n #this checks whether the update_par returns None (that would be an error)\n#while True:\n# site = rd.randint(0,2*N+2)\n# if update_par(site, 1,1,1,1,1,1,1,1)==None:\n# print('omg no')\n\n###########################################################################\n\nfor i in range(averages):\n #changing k12:\n for ii in range(len(k12_values)):\n k12 = k12_values[ii]\n print(k12)\n for jj in range(len(k21_values)):\n k21 = k21_values[jj]\n\n #I should empty the lattice here\n L1 = np.zeros(N) #initialize lattice 1\n L2 = np.zeros(N) #initialize lattice 2\n step = 0 #init step variable\n\n while not stuck_position() and not step > max_step:\n #print('yes')\n step += 1\n for j in range(2*N+2):\n site = rd.randint(0,2*N+2)\n update(site)\n print(\"step:\", step)\n stuck_steps_matrix[jj,ii] += step/averages\n\n\n#save the matrix into a txt\nName = \"stuck_heatmapN%s_moreAverages\"%(N)\nheading = \"parameters step: %s k12_values: %s \\n k21_values %s \\n\"% (len(k12_values), k12_values, k21_values)\ndata = stuck_steps_matrix\ndata = np.array(data)\n#data = np.transpose(data)\n#fmt = \"%-10d\", \"%-10.3f\", \"%-10.3f\"\nnp.savetxt(Name, data, fmt = \"%-10d\", delimiter = \"\\t\", header = heading)\n\n\n\n\n#heat map plot:\nf1 = plt.figure()\nplt.xticks(ticks=np.arange(len(k12_values)),labels=k12_values)\nplt.yticks(ticks=np.arange(len(k21_values)),labels=k21_values)\nplt.title(\"Stuck Lattice heatmap(number of sites = %s, resolution in a = %s, b = %s)\"%(N, len(k12_values), len(k21_values)))\nplt.ylabel(\"k21 value\")\nplt.xlabel(\"k12 value\")\n# save this plot inside a variable called hm\nhm=plt.imshow(stuck_steps_matrix, cmap='hot',interpolation=\"None\")\n# pass this heatmap object into plt.colorbar method.\nplt.colorbar(hm)\nplt.show()\n"
] | [
[
"numpy.zeros",
"numpy.savetxt",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.random.rand",
"numpy.flip",
"numpy.array",
"matplotlib.pyplot.colorbar",
"numpy.random.randint",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
yiruiliu110/eegnn | [
"253773c301681bb00b4789c34f48c82468ad16da"
] | [
"estimation/add_k.py"
] | [
"\"\"\"\nThis script is used to add a new cluster.\n\"\"\"\nimport torch\n\nfrom estimation.stirling_number import stirling_number\n\n\ndef build_injection(count, active_k, max_k, gamma):\n #print('count', count)\n with_sample_indices = count > 0\n\n remaining_indices = torch.squeeze(torch.cat([torch.tensor([True]), with_sample_indices[1::]], dim=0).nonzero())\n\n dict_tmp = {}\n index = 0\n for j in range(1, active_k):\n if with_sample_indices[j]:\n index += 1\n dict_tmp[j] = index\n\n old_active_K = index + 1\n add_number = stirling_number(count[0], gamma)\n\n if old_active_K + add_number <= max_k:\n new_active_K = old_active_K + add_number\n else:\n new_active_K = max_k\n\n def fn(x):\n if x == 0:\n return torch.randint(low=old_active_K, high=new_active_K, size=()).item()\n else:\n return dict_tmp[x]\n\n return fn, new_active_K, remaining_indices\n\n\ndef add_k(c, active_k, max_k, gamma):\n \"\"\"\n replace the cluster indictors of 0 to active_K+1\n :param c: a sparse matrix to indicate the cluster membership.\n :return: a new sparse matrix to indicate the cluster membership.\n \"\"\"\n indices = c._indices()\n values = c._values()\n\n values_one_hot = torch.nn.functional.one_hot(values, num_classes=active_k)\n count = torch.sum(values_one_hot, dim=0)\n\n fn, new_active_K, remaining_indices = build_injection(count, active_k, max_k, gamma)\n\n values = values.apply_(fn)\n\n c = torch.sparse_coo_tensor(indices, values, c.size())\n\n return c, new_active_K, remaining_indices\n\n\ndef switch(inputs, remaining_indices, max_k):\n remaining = torch.index_select(inputs, dim=0, index=remaining_indices)\n deleting_indices = generating_deleting_indices(max_k, remaining_indices)\n deleting = torch.index_select(inputs, dim=0, index=deleting_indices)\n outputs = torch.cat([remaining, deleting], dim=0)\n return outputs\n\n\ndef generating_deleting_indices(max_k, remaining_indices):\n deleting_indices = torch.tensor([int(item) for item in torch.arange(0, max_k) if item not in remaining_indices])\n return deleting_indices\n\n\nif __name__ == \"__main__\":\n\n i = [[0, 1, 1, 2],\n [2, 0, 2, 1]]\n v_c = [0, 1, 2, 0]\n active_K = 3\n c = torch.sparse_coo_tensor(i, v_c, (3, 3))\n\n c_new = add_k(c, active_K, max_k=10, gamma=1)\n print(c_new)"
] | [
[
"torch.sum",
"torch.randint",
"torch.sparse_coo_tensor",
"torch.tensor",
"torch.nn.functional.one_hot",
"torch.arange",
"torch.index_select",
"torch.cat"
]
] |
Lemswasabi/transformers | [
"1762ded30a49649bdd5f8f5ee38b46dea051026a"
] | [
"src/transformers/models/wavlm/modeling_wavlm.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch WavLM model.\"\"\"\n\nimport math\nimport warnings\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...deepspeed import is_deepspeed_zero3_enabled\nfrom ...modeling_outputs import (\n BaseModelOutput,\n CausalLMOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n Wav2Vec2BaseModelOutput,\n XVectorOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import torch_int_div\nfrom ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_wavlm import WavLMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n\n_HIDDEN_STATES_START_POSITION = 2\n\n# General docstring\n_CONFIG_FOR_DOC = \"WavLMConfig\"\n_PROCESSOR_FOR_DOC = \"Wav2Vec2Processor\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"patrickvonplaten/wavlm-libri-clean-100h-base-plus\"\n_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]\n\n# CTC docstring\n_CTC_EXPECTED_OUTPUT = \"'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'\"\n_CTC_EXPECTED_LOSS = 12.51\n\n# Audio class docstring\n_FEAT_EXTRACTOR_FOR_DOC = \"Wav2Vec2FeatureExtractor\"\n_SEQ_CLASS_CHECKPOINT = \"hf-internal-testing/tiny-random-wavlm\"\n_SEQ_CLASS_EXPECTED_OUTPUT = \"'no'\" # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n_SEQ_CLASS_EXPECTED_LOSS = 0.7 # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n\n# Frame class docstring\n_FRAME_CLASS_CHECKPOINT = \"microsoft/wavlm-base-plus-sd\"\n_FRAME_EXPECTED_OUTPUT = [0, 0]\n\n# Speaker Verification docstring\n_XVECTOR_CHECKPOINT = \"microsoft/wavlm-base-plus-sv\"\n_XVECTOR_EXPECTED_OUTPUT = 0.97\n\nWAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/wavlm-base\",\n \"microsoft/wavlm-base-plus\",\n \"microsoft/wavlm-large\",\n # See all WavLM models at https://huggingface.co/models?filter=wavlm\n]\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices\ndef _compute_mask_indices(\n shape: Tuple[int, int],\n mask_prob: float,\n mask_length: int,\n attention_mask: Optional[torch.LongTensor] = None,\n min_masks: int = 0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\n ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on\n CPU as part of the preprocessing during training.\n\n Args:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension.\n \"\"\"\n batch_size, sequence_length = shape\n\n if mask_length < 1:\n raise ValueError(\"`mask_length` has to be bigger than 0.\")\n\n if mask_length > sequence_length:\n raise ValueError(\n f\"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}\"\n f\" and `sequence_length`: {sequence_length}`\"\n )\n\n # epsilon is used for probabilistic rounding\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n\n # make sure num masked span <= sequence_length\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n\n # make sure num_masked span is also <= input_length - (mask_length - 1)\n if input_length - (mask_length - 1) < num_masked_span:\n num_masked_span = max(input_length - (mask_length - 1), 0)\n\n return num_masked_span\n\n # compute number of masked spans in batch\n input_lengths = (\n attention_mask.sum(-1).detach().tolist()\n if attention_mask is not None\n else [sequence_length for _ in range(batch_size)]\n )\n\n # SpecAugment mask to fill\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)\n spec_aug_mask_idxs = []\n\n max_num_masked_span = compute_num_masked_span(sequence_length)\n\n if max_num_masked_span == 0:\n return spec_aug_mask\n\n for input_length in input_lengths:\n # compute num of masked spans for this input\n num_masked_span = compute_num_masked_span(input_length)\n\n # get random indices to mask\n spec_aug_mask_idx = np.random.choice(\n np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False\n )\n\n # pick first sampled index that will serve as a dummy index to pad vector\n # to ensure same dimension for all batches due to probabilistic rounding\n # Picking first sample just pads those vectors twice.\n if len(spec_aug_mask_idx) == 0:\n # this case can only happen if `input_length` is strictly smaller then\n # `sequence_length` in which case the last token has to be a padding\n # token which we can use as a dummy mask id\n dummy_mask_idx = sequence_length - 1\n else:\n dummy_mask_idx = spec_aug_mask_idx[0]\n\n spec_aug_mask_idx = np.concatenate(\n [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]\n )\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n\n # expand masked indices to masked spans\n spec_aug_mask_idxs = np.broadcast_to(\n spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n\n # add offset to the starting indexes so that that indexes now create a span\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(\n batch_size, max_num_masked_span * mask_length\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n\n # ensure that we cannot have indices larger than sequence_length\n if spec_aug_mask_idxs.max() > sequence_length - 1:\n spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1\n\n # scatter indices to mask\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n\n return spec_aug_mask\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM\nclass WavLMNoLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM\nclass WavLMLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n\n hidden_states = hidden_states.transpose(-2, -1)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states.transpose(-2, -1)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM\nclass WavLMGroupNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM\nclass WavLMPositionalConvEmbedding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.hidden_size,\n config.hidden_size,\n kernel_size=config.num_conv_pos_embeddings,\n padding=config.num_conv_pos_embeddings // 2,\n groups=config.num_conv_pos_embedding_groups,\n )\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_v)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_g)\n else:\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n\n self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n\n hidden_states = self.conv(hidden_states)\n hidden_states = self.padding(hidden_states)\n hidden_states = self.activation(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM\nclass WavLMSamePadLayer(nn.Module):\n def __init__(self, num_conv_pos_embeddings):\n super().__init__()\n self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0\n\n def forward(self, hidden_states):\n if self.num_pad_remove > 0:\n hidden_states = hidden_states[:, :, : -self.num_pad_remove]\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->WavLM\nclass WavLMFeatureEncoder(nn.Module):\n \"\"\"Construct the features from raw audio waveform\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n if config.feat_extract_norm == \"group\":\n conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [\n WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)\n ]\n elif config.feat_extract_norm == \"layer\":\n conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]\n else:\n raise ValueError(\n f\"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']\"\n )\n self.conv_layers = nn.ModuleList(conv_layers)\n self.gradient_checkpointing = False\n self._requires_grad = True\n\n def _freeze_parameters(self):\n for param in self.parameters():\n param.requires_grad = False\n self._requires_grad = False\n\n def forward(self, input_values):\n hidden_states = input_values[:, None]\n\n # make sure hidden_states require grad for gradient_checkpointing\n if self._requires_grad and self.training:\n hidden_states.requires_grad = True\n\n for conv_layer in self.conv_layers:\n if self._requires_grad and self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(conv_layer),\n hidden_states,\n )\n else:\n hidden_states = conv_layer(hidden_states)\n\n return hidden_states\n\n\nclass WavLMFeatureExtractor(WavLMFeatureEncoder):\n def __init__(self, config):\n super().__init__(config)\n warnings.warn(\n f\"The class `{self.__class__.__name__}` has been depreciated \"\n \"and will be removed in Transformers v5. \"\n f\"Use `{self.__class__.__bases__[0].__name__}` instead.\",\n FutureWarning,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM\nclass WavLMFeatureProjection(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)\n self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)\n self.dropout = nn.Dropout(config.feat_proj_dropout)\n\n def forward(self, hidden_states):\n # non-projected hidden states are needed for quantization\n norm_hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.projection(norm_hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states, norm_hidden_states\n\n\nclass WavLMAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n num_buckets: int = 320,\n max_distance: int = 800,\n has_relative_position_bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n\n self.num_buckets = num_buckets\n self.max_distance = max_distance\n\n self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1))\n self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8)\n\n if has_relative_position_bias:\n self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_bias: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n index=0,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Attention layer with relative attention\"\"\"\n bsz, tgt_len, _ = hidden_states.size()\n\n # first pass of attention layer creates position bias\n if position_bias is None:\n position_bias = self.compute_bias(tgt_len, tgt_len)\n position_bias = (\n position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len)\n )\n\n # Compute relative position bias:\n # 1) get reshape hidden_states\n gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1))\n gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3)\n\n # 2) project hidden states\n relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states)\n relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1)\n\n # 3) compute gate for position bias from projected hidden states\n gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1)\n gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0\n\n # 4) apply gate to position bias to compute gated position_bias\n gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias\n gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len))\n\n attn_output, attn_weights = self.torch_multi_head_self_attention(\n hidden_states, attention_mask, gated_position_bias, output_attentions\n )\n\n return attn_output, attn_weights, position_bias\n\n def torch_multi_head_self_attention(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Union[torch.LongTensor, torch.BoolTensor],\n gated_position_bias: torch.FloatTensor,\n output_attentions: bool,\n ) -> (torch.FloatTensor, torch.FloatTensor):\n \"\"\"simple wrapper around torch's multi_head_attention_forward function\"\"\"\n # self-attention assumes q = k = v\n query = key = value = hidden_states.transpose(0, 1)\n key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None\n\n # disable bias and add_zero_attn\n bias_k = bias_v = None\n add_zero_attn = False\n\n # PyTorch 1.3.0 has F.multi_head_attention_forward defined\n # so no problem with backwards compatibility\n attn_output, attn_weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n bias_k,\n bias_v,\n add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training,\n key_padding_mask,\n output_attentions,\n gated_position_bias,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n\n # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...]\n attn_output = attn_output.transpose(0, 1)\n\n if attn_weights is not None:\n # IMPORTANT: Attention weights are averaged weights\n # here which should not be the case. This is an open issue\n # on PyTorch: https://github.com/pytorch/pytorch/issues/32590\n attn_weights = attn_weights[:, None].broadcast_to(\n attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:]\n )\n\n return attn_output, attn_weights\n\n def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor:\n context_position = torch.arange(query_length, dtype=torch.long)[:, None]\n memory_position = torch.arange(key_length, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position\n relative_position_bucket = self._relative_positions_bucket(relative_position)\n relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device)\n values = self.rel_attn_embed(relative_position_bucket)\n values = values.permute([2, 0, 1])\n return values\n\n def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor:\n num_buckets = self.num_buckets // 2\n\n relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets\n relative_positions = torch.abs(relative_positions)\n\n max_exact = num_buckets // 2\n is_small = relative_positions < max_exact\n\n relative_positions_if_large = torch.log(relative_positions.float() / max_exact)\n relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact)\n relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact)\n relative_postion_if_large = (max_exact + relative_positions_if_large).to(torch.long)\n relative_postion_if_large = torch.min(\n relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)\n )\n\n relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)\n return relative_buckets\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM\nclass WavLMFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate_dropout = nn.Dropout(config.activation_dropout)\n\n self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.output_dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.intermediate_dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.intermediate_dropout(hidden_states)\n\n hidden_states = self.output_dense(hidden_states)\n hidden_states = self.output_dropout(hidden_states)\n return hidden_states\n\n\nclass WavLMEncoderLayer(nn.Module):\n def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):\n super().__init__()\n self.attention = WavLMAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n num_buckets=config.num_buckets,\n max_distance=config.max_bucket_distance,\n has_relative_position_bias=has_relative_position_bias,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = WavLMFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0):\n attn_residual = hidden_states\n hidden_states, attn_weights, position_bias = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n index=index,\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n\n hidden_states = self.layer_norm(hidden_states)\n\n hidden_states = hidden_states + self.feed_forward(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states, position_bias)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass WavLMEncoderLayerStableLayerNorm(nn.Module):\n def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):\n super().__init__()\n self.attention = WavLMAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n num_buckets=config.num_buckets,\n max_distance=config.max_bucket_distance,\n has_relative_position_bias=has_relative_position_bias,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = WavLMFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states = self.layer_norm(hidden_states)\n hidden_states, attn_weights, position_bias = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))\n\n outputs = (hidden_states, position_bias)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass WavLMEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = WavLMPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens output 0\n hidden_states[~attention_mask] = 0.0\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n position_bias = None\n\n for i, layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_bias,\n )\n else:\n layer_outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n index=i,\n )\n\n hidden_states, position_bias = layer_outputs[:2]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass WavLMEncoderStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = WavLMPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [\n WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0))\n for i in range(config.num_hidden_layers)\n ]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens are not attended to\n hidden_states[~attention_mask] = 0\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n position_bias = None\n\n for i, layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_bias,\n )\n else:\n layer_outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n position_bias=position_bias,\n )\n hidden_states, position_bias = layer_outputs[:2]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions\n )\n\n\nclass WavLMGumbelVectorQuantizer(nn.Module):\n \"\"\"\n Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH\n GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.num_groups = config.num_codevector_groups\n self.num_vars = config.num_codevectors_per_group\n\n if config.codevector_dim % self.num_groups != 0:\n raise ValueError(\n f\"`config.codevector_dim {config.codevector_dim} must be divisible\"\n f\" by `config.num_codevector_groups` {self.num_groups} \"\n \"for concatenation.\"\n )\n\n # storage for codebook variables (codewords)\n self.codevectors = nn.Parameter(\n torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)\n )\n self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)\n\n # can be decayed for training\n self.temperature = 2\n\n @staticmethod\n def _compute_perplexity(probs):\n marginal_probs = probs.mean(dim=0)\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n\n def forward(self, hidden_states):\n batch_size, sequence_length, hidden_size = hidden_states.shape\n\n # project to codevector dim\n hidden_states = self.weight_proj(hidden_states)\n hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)\n\n if self.training:\n # sample code vector probs via gumbel in differentiateable way\n codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True)\n codevector_probs = codevector_probs.type_as(hidden_states)\n\n # compute perplexity\n codevector_soft_dist = torch.softmax(\n hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1\n )\n perplexity = self._compute_perplexity(codevector_soft_dist)\n else:\n # take argmax in non-differentiable way\n # comptute hard codevector distribution (one hot)\n codevector_idx = hidden_states.argmax(dim=-1)\n codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(\n -1, codevector_idx.view(-1, 1), 1.0\n )\n codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)\n\n perplexity = self._compute_perplexity(codevector_probs)\n\n codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)\n # use probs to retrieve codevectors\n codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors\n codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)\n codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)\n\n return codevectors, perplexity\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM\nclass WavLMAdapter(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n # feature dim might need to be down-projected\n if config.output_hidden_size != config.hidden_size:\n self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)\n self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)\n else:\n self.proj = self.proj_layer_norm = None\n\n self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers))\n self.layerdrop = config.layerdrop\n\n def forward(self, hidden_states):\n # down project hidden_states if necessary\n if self.proj is not None and self.proj_layer_norm is not None:\n hidden_states = self.proj(hidden_states)\n hidden_states = self.proj_layer_norm(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n\n for layer in self.layers:\n layerdrop_prob = np.random.random()\n if not self.training or (layerdrop_prob > self.layerdrop):\n hidden_states = layer(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM\nclass WavLMAdapterLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.output_hidden_size,\n 2 * config.output_hidden_size,\n config.adapter_kernel_size,\n stride=config.adapter_stride,\n padding=1,\n )\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = nn.functional.glu(hidden_states, dim=1)\n\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel with Wav2Vec2->WavLM, wav2vec2->wavlm\nclass WavLMPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = WavLMConfig\n base_model_prefix = \"wavlm\"\n main_input_name = \"input_values\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n # gumbel softmax requires special init\n if isinstance(module, WavLMGumbelVectorQuantizer):\n module.weight_proj.weight.data.normal_(mean=0.0, std=1)\n module.weight_proj.bias.data.zero_()\n nn.init.uniform_(module.codevectors)\n elif isinstance(module, WavLMPositionalConvEmbedding):\n nn.init.normal_(\n module.conv.weight,\n mean=0,\n std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),\n )\n nn.init.constant_(module.conv.bias, 0)\n elif isinstance(module, WavLMFeatureProjection):\n k = math.sqrt(1 / module.projection.in_features)\n nn.init.uniform_(module.projection.weight, a=-k, b=k)\n nn.init.uniform_(module.projection.bias, a=-k, b=k)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, nn.Conv1d):\n nn.init.kaiming_normal_(module.weight)\n\n if module.bias is not None:\n k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))\n nn.init.uniform_(module.bias, a=-k, b=k)\n\n def _get_feat_extract_output_lengths(\n self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None\n ):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n\n add_adapter = self.config.add_adapter if add_adapter is None else add_adapter\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return torch_int_div(input_length - kernel_size, stride) + 1\n\n for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):\n input_lengths = _conv_out_length(input_lengths, kernel_size, stride)\n\n if add_adapter:\n for _ in range(self.config.num_adapter_layers):\n input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(\n self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None\n ):\n # Effectively attention_mask.sum(-1), but not inplace to be able to run\n # on inference mode.\n non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]\n\n output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)\n output_lengths = output_lengths.to(torch.long)\n\n batch_size = attention_mask.shape[0]\n\n attention_mask = torch.zeros(\n (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n # these two operations makes sure that all values before the output lengths idxs are attended to\n attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()\n return attention_mask\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (WavLMEncoder, WavLMEncoderStableLayerNorm, WavLMFeatureEncoder)):\n module.gradient_checkpointing = value\n\n\nWAVLM_START_DOCSTRING = r\"\"\"\n WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled\n Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,\n Michael Zeng, Xuedong Huang.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`WavLMConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\nWAVLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`WavLMProcessor`] should be used for padding\n and conversion into a tensor of type *torch.FloatTensor*. See [`WavLMProcessor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should\n **not** be passed to avoid degraded performance when doing batched inference. For such models\n `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these\n models also yield slightly different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM, WavLMBaseModelOutput->Wav2Vec2BaseModelOutput\nclass WavLMModel(WavLMPreTrainedModel):\n def __init__(self, config: WavLMConfig):\n super().__init__(config)\n self.config = config\n self.feature_extractor = WavLMFeatureEncoder(config)\n self.feature_projection = WavLMFeatureProjection(config)\n\n # model only needs masking vector if mask prob is > 0.0\n if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:\n self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())\n\n if config.do_stable_layer_norm:\n self.encoder = WavLMEncoderStableLayerNorm(config)\n else:\n self.encoder = WavLMEncoder(config)\n\n self.adapter = WavLMAdapter(config) if config.add_adapter else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.feature_extractor._freeze_parameters()\n\n def _mask_hidden_states(\n self,\n hidden_states: torch.FloatTensor,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Masks extracted features along time axis and/or along feature axis according to\n [SpecAugment](https://arxiv.org/abs/1904.08779).\n \"\"\"\n\n # `config.apply_spec_augment` can set masking to False\n if not getattr(self.config, \"apply_spec_augment\", True):\n return hidden_states\n\n # generate indices & apply SpecAugment along time axis\n batch_size, sequence_length, hidden_size = hidden_states.size()\n\n if mask_time_indices is not None:\n # apply SpecAugment along time axis with given mask_time_indices\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n elif self.config.mask_time_prob > 0 and self.training:\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.config.mask_time_prob,\n mask_length=self.config.mask_time_length,\n attention_mask=attention_mask,\n min_masks=self.config.mask_time_min_masks,\n )\n mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n\n if self.config.mask_feature_prob > 0 and self.training:\n # generate indices & apply SpecAugment along feature axis\n mask_feature_indices = _compute_mask_indices(\n (batch_size, hidden_size),\n mask_prob=self.config.mask_feature_prob,\n mask_length=self.config.mask_feature_length,\n min_masks=self.config.mask_feature_min_masks,\n )\n mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)\n mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)\n hidden_states[mask_feature_indices] = 0\n\n return hidden_states\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Wav2Vec2BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(\n extract_features.shape[1], attention_mask, add_adapter=False\n )\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.adapter is not None:\n hidden_states = self.adapter(hidden_states)\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return Wav2Vec2BaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForCTC(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n self.dropout = nn.Dropout(config.final_dropout)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n output_hidden_size = (\n config.output_hidden_size if hasattr(config, \"add_adapter\") and config.add_adapter else config.hidden_size\n )\n self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_CTC_EXPECTED_OUTPUT,\n expected_loss=_CTC_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, CausalLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForSequenceClassification(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)\"\n )\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)\n self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_SEQ_CLASS_CHECKPOINT,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,\n expected_loss=_SEQ_CLASS_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n if attention_mask is None:\n pooled_output = hidden_states.mean(dim=1)\n else:\n padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)\n hidden_states[~padding_mask] = 0.0\n pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with a frame classification head on top for tasks like Speaker Diarization.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForAudioFrameClassification(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)\"\n )\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_FRAME_CLASS_CHECKPOINT,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_FRAME_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n logits = self.classifier(hidden_states)\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return output\n\n return TokenClassifierOutput(\n loss=None,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss\nclass AMSoftmaxLoss(nn.Module):\n def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):\n super(AMSoftmaxLoss, self).__init__()\n self.scale = scale\n self.margin = margin\n self.num_labels = num_labels\n self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, hidden_states, labels):\n labels = labels.flatten()\n weight = nn.functional.normalize(self.weight, dim=0)\n hidden_states = nn.functional.normalize(hidden_states, dim=1)\n cos_theta = torch.mm(hidden_states, weight)\n psi = cos_theta - self.margin\n\n onehot = nn.functional.one_hot(labels, self.num_labels)\n logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)\n loss = self.loss(logits, labels)\n\n return loss\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer\nclass TDNNLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]\n self.out_conv_dim = config.tdnn_dim[layer_id]\n self.kernel_size = config.tdnn_kernel[layer_id]\n self.dilation = config.tdnn_dilation[layer_id]\n\n self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)\n self.activation = nn.ReLU()\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.unsqueeze(1)\n hidden_states = nn.functional.unfold(\n hidden_states,\n (self.kernel_size, self.in_conv_dim),\n stride=(1, self.in_conv_dim),\n dilation=(self.dilation, 1),\n )\n hidden_states = hidden_states.transpose(1, 2)\n hidden_states = self.kernel(hidden_states)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForXVector(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])\n\n tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]\n self.tdnn = nn.ModuleList(tdnn_layers)\n\n self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)\n self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)\n\n self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):\n \"\"\"\n Computes the output length of the TDNN layers\n \"\"\"\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return (input_length - kernel_size) // stride + 1\n\n for kernel_size in self.config.tdnn_kernel:\n input_lengths = _conv_out_length(input_lengths, kernel_size, 1)\n\n return input_lengths\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_XVECTOR_CHECKPOINT,\n output_type=XVectorOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_XVECTOR_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, XVectorOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n\n for tdnn_layer in self.tdnn:\n hidden_states = tdnn_layer(hidden_states)\n\n # Statistic Pooling\n if attention_mask is None:\n mean_features = hidden_states.mean(dim=1)\n std_features = hidden_states.std(dim=1)\n else:\n feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))\n tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)\n mean_features = []\n std_features = []\n for i, length in enumerate(tdnn_output_lengths):\n mean_features.append(hidden_states[i, :length].mean(dim=0))\n std_features.append(hidden_states[i, :length].std(dim=0))\n mean_features = torch.stack(mean_features)\n std_features = torch.stack(std_features)\n statistic_pooling = torch.cat([mean_features, std_features], dim=-1)\n\n output_embeddings = self.feature_extractor(statistic_pooling)\n logits = self.classifier(output_embeddings)\n\n loss = None\n if labels is not None:\n loss = self.objective(logits, labels)\n\n if not return_dict:\n output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return XVectorOutput(\n loss=loss,\n logits=logits,\n embeddings=output_embeddings,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"torch.empty",
"numpy.ones",
"torch.stack",
"torch.nn.functional.softmax",
"torch.mm",
"torch.nn.functional.one_hot",
"torch.log",
"torch.nn.ModuleList",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.init.kaiming_normal_",
"torch.nn.GroupNorm",
"torch.backends.cudnn.flags",
"torch.randn",
"torch.nn.LayerNorm",
"torch.nn.functional.glu",
"numpy.random.rand",
"torch.arange",
"torch.nn.functional.unfold",
"torch.sigmoid",
"numpy.random.uniform",
"torch.ones_like",
"torch.ones",
"torch.nn.functional.ctc_loss",
"numpy.zeros",
"torch.nn.functional.normalize",
"torch.nn.init.uniform_",
"torch.tensor",
"numpy.arange",
"torch.nn.Conv1d",
"numpy.broadcast_to",
"torch.nn.utils.weight_norm",
"numpy.array",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.Embedding",
"torch.nn.CrossEntropyLoss",
"numpy.random.random",
"torch.where",
"torch.abs",
"torch.full_like",
"torch.zeros",
"torch.nn.ReLU",
"numpy.put_along_axis"
]
] |
fac2003/perceiver-pytorch | [
"b07d5154c5dee63684c59f57d02a1b405701845f"
] | [
"tests/test_multimodality_with_text_perceiver.py"
] | [
"from torch.nn import Embedding\nimport pytest\n\nfrom fixtures import *\nfrom perceiver_pytorch.modalities import InputModalityWithEmbedding\nfrom perceiver_pytorch.multi_modality_with_text_perceiver import MultiModalityWithTextPerceiver\n\n\ndef test_embedding_for_layer(text_inputs):\n text_modality = InputModalityWithEmbedding(\n name='text',\n input_channels=1, # 1 channel for long ids representing tokens\n input_axis=1, # number of axes, 2 for images\n num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)\n max_freq=8., # maximum frequency, hyperparameter depending on how fine the data is\n embedding=Embedding(32000, text_embedding_dim)\n )\n assert text_inputs.size() == (3, 512,1)\n embedded = text_modality.embedding(text_inputs)\n assert embedded.size()==(3, 512,1, 256)\n assert text_modality.embedding_for_layer(embedded=embedded.squeeze(2), layer_index=0, depth=4).size() == (3, 512, 256//4)\n\n\ndef test_multimodality_forward_image_text(image_inputs,\n text_inputs,\n targets):\n image_modality = InputModalityWithEmbedding(\n name='image',\n input_channels=3, # number of channels for each token of the input\n input_axis=2, # number of axes, 2 for images\n num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)\n max_freq=4., # maximum frequency, hyperparameter depending on how fine the data is\n )\n text_modality = InputModalityWithEmbedding(\n name='text',\n input_channels=1, # 1 channel for long ids representing tokens\n input_axis=1, # number of axes, 2 for images\n num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)\n max_freq=8., # maximum frequency, hyperparameter depending on how fine the data is\n embedding=Embedding(32000, text_embedding_dim)\n )\n model = MultiModalityWithTextPerceiver(\n modalities=(image_modality, text_modality),\n depth=depth, # depth of net\n num_latent_blocks_per_layer=2,\n num_latents=12,\n # number of latents, or induced set points, or centroids. different papers giving it different names\n latent_dim=64, # latent dimension\n cross_heads=1, # number of heads for cross attention. paper said 1\n latent_heads=8, # number of heads for latent self attention, 8\n cross_dim_head=64,\n latent_dim_head=64,\n num_classes=num_classes, # output number of classes\n attn_dropout=0.,\n ff_dropout=0.,\n weight_tie_layers=True,\n # whether to weight tie layers (optional, as indicated in the diagram)\n )\n result = model({'image': image_inputs,\n 'text': text_inputs})\n assert result is not None\n"
] | [
[
"torch.nn.Embedding"
]
] |
nicoroulet/thesis | [
"7b47a67b986a96633e9ee775ae96199a85995e01"
] | [
"src/Tools.py"
] | [
"\"\"\"Collection of Tools.\"\"\"\n\nimport numpy as np\nimport random\nimport os\n\ndef get_label_index(Y, bbox):\n x1, x2, y1, y2, z1, z2 = bbox\n Y_cropped = Y[x1:x2, y1:y2, z1:z2]\n labels = range(int(np.max(Y_cropped)) + 1)\n label_index = {}\n for label in labels:\n label_index[label] = np.argwhere(Y_cropped == label)\n return label_index\n\n\ndef get_voxel_of_rand_label(Y, bbox, label_index, ignore_bg=False):\n \"\"\"Random voxel from the given index, with balanced label probabilities.\n\n Args:\n Y (Numpy array): Image from which to pick the voxel.\n bbox (tuple): bounding box x1, x2, y1, y2, z1, z2 from which to\n sample the voxel.\n\n Returns:\n Numpy array: coordinates of the chosen voxel.\n\n \"\"\"\n labels = range(ignore_bg, int(np.max(Y)) + 1)\n x1, x2, y1, y2, z1, z2 = bbox\n Y_cropped = Y[x1:x2, y1:y2, z1:z2]\n while (True):\n label = np.random.choice(labels)\n try:\n voxel = random.choice(label_index[label])[:-1]\n return voxel + np.array([x1, y1, z1])\n except IndexError:\n pass\n\n\ndef get_bounding_box(X, patch_multiplicity):\n \"\"\"Get the bounding box of an image.\n\n The bounding box is the smallest box that contains all nonzero elements of\n the volume. The multiplicity defined by the generator is enforced by\n enlarging the box if needed.\n\n Args:\n X (numpy array): image volume from which to calculate the box\n patch_multiplicity (int): multiplicity enforced to dimensions of bounding box.\n\n Returns:\n tuple: xmin, xmax, ymin, ymax, zmin, zmax; 3D bounding box\n \"\"\"\n try:\n X = np.squeeze(X, axis=0)\n except ValueError:\n pass # axis 0 is not single-dimensional\n # Clear possible interpolation artifacts around actual brain.\n mask = X != bg_value\n # X = X * np.abs(X) > 0.0001\n out = []\n for ax in ((1, 2), (0, 2), (0, 1)):\n collapsed_mask = np.any(mask, axis=ax)\n\n vmin, vmax = np.where(collapsed_mask)[0][[0, -1]]\n max_size = collapsed_mask.shape[0]\n size = vmax - vmin\n # FIXME: if size % patch_multiplicity == 0, this adds innecesary size.\n new_size = size + (patch_multiplicity - size % patch_multiplicity)\n diff = new_size - size\n # Expand the box to enforce multiplicity, without exceeding the [0, max_size) interval.\n new_vmin = max(0, min(vmin - diff // 2, max_size - new_size))\n new_vmax = min(max_size, new_vmin + new_size)\n out.extend([new_vmin, new_vmax])\n return tuple(out)\n\n\ndef generate_cuboid_centered(cuboid_shape, volume_shape, center_voxel):\n \"\"\"Generate a cuboid to crop a patch, centered on a given voxel.\n\n Args:\n cuboid_shape (iterable): shape of returned cuboid.\n volume_shape (iterable): tuple width, height, depth. Volume that\n contains the returned cuboid.\n center_voxel (iterable): 3D point x, y, z that will be centered in\n the returned cuboid.\n\n Returns:\n tuple: cuboid (x1, x2, y1, y2, z1, z2) that contains `center_voxel`\n and is fully contained by `volume_shape`. The generated cuboid is,\n as much as possible, centered on `center_voxel`.\n\n \"\"\"\n x1, y1, z1 = v = np.minimum(np.maximum(0, np.array(center_voxel) -\n np.array(cuboid_shape, dtype='int') // 2),\n np.array(volume_shape) - cuboid_shape)\n x2, y2, z2 = v + cuboid_shape\n return x1, x2, y1, y2, z1, z2\n\n\ndef generate_cuboid_containing(cuboid_shape, volume_shape, contained_voxel):\n \"\"\"Generate a cuboid to crop a patch, containing a given voxel.\n\n Args:\n cuboid_shape (iterable): shape of returned cuboid.\n volume_shape (iterable): tuple width, height, depth. Volume that\n contains the returned cuboid.\n contained_voxel (iterable): 3D point x, y, z that will be contained in\n the returned cuboid.\n\n Returns:\n tuple: cuboid (x1, x2, y1, y2, z1, z2) that contains `contained_voxel`\n and is fully contained by `volume_shape`.\n\n \"\"\"\n cuboid_width, cuboid_height, cuboid_depth = cuboid_shape\n width, height, depth = volume_shape\n vx, vy, vz = contained_voxel\n x1 = np.random.randint(max(0, vx - cuboid_width),\n min(vx + 1, width - cuboid_width))\n y1 = np.random.randint(max(0, vy - cuboid_height),\n min(vy + 1, height - cuboid_height))\n z1 = np.random.randint(max(0, vz - cuboid_depth),\n min(vz + 1, depth - cuboid_depth))\n x1 = random.randrange(width - cuboid_width)\n x2 = x1 + cuboid_width\n y2 = y1 + cuboid_height\n z2 = z1 + cuboid_depth\n return x1, x2, y1, y2, z1, z2\n\n\ndef filter_modalities(all_modalities, target_modalities, x):\n \"\"\"Filter channels from x based on the given modalities.\n\n Modalities are represented on the last dimension of `x` and are the different types of images\n (t1, t2, flair, etc.). This is used to feed a dataset with extra modalities to a net that has\n been trained on a subset of them.\n\n Args:\n all_modalities (list): modalities of x.\n target_modalities (list): desired modalities.\n x (numpy array): image or batch of images to filter.\n\n Returns:\n numpy array: filtered x\n\n \"\"\"\n # TODO: this is inefficient. Furthermore, it may be innecessarily recomputed on repeated calls.\n target_indexes = [i for (i, modality) in enumerate(all_modalities)\n if modality in target_modalities]\n\n return x[..., target_indexes]\n\nbg_value = -4\n\n_model_subdir = ''\n\ndef set_model_subdir(subdir):\n global _model_subdir\n _model_subdir = subdir\n\ndef get_dataset_savedir(dataset, loss=None):\n \"\"\"Figure out savedir from a given dataset and loss function.\n\n Args:\n dataset (Dataset): the Dataset.\n loss (string or function, optional): Dataset loss. Default is\n `sparse_categorical_crossentropy`.\n\n \"\"\"\n savedir = '../models/%s/unet_%s' % (_model_subdir, dataset.name)\n if loss is not None and loss != 'sparse_categorical_crossentropy':\n savedir += '_' + (loss if isinstance(loss, str) else loss.__name__)\n return savedir\n\ndef ensure_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n"
] | [
[
"numpy.squeeze",
"numpy.argwhere",
"numpy.any",
"numpy.random.choice",
"numpy.max",
"numpy.array",
"numpy.where"
]
] |
ACTCollaboration/tilec | [
"11ed8d027ad6ffac09b3e291a047f33e97673f14"
] | [
"bin/planck/verify_projection.py"
] | [
"from __future__ import print_function\nfrom orphics import maps,io,cosmology\nfrom pixell import enmap\nimport numpy as np\nimport os,sys\nfrom soapack import interfaces as sints\n\ndef get_coadd(imaps,wts,axis):\n # sum(w*m)/sum(w)\n twt = np.sum(wts,axis=axis)\n retmap = np.sum(wts*imaps,axis=axis)/twt\n retmap[~np.isfinite(retmap)] = 0\n return retmap,twt\n\ndef get_npol(array):\n if array=='545' or array=='857':return 1\n else: return 3\n\nmask = sints.get_act_mr3_crosslinked_mask('deep56',version='180323')\ndm = sints.PlanckHybrid(region=mask)\nbin_edges = np.arange(30,6000,40)\n\np1ds = {}\nfor array in dm.arrays:\n splits = dm.get_splits(array,srcfree=False)[0]\n ivars = dm.get_splits_ivar(array)[0]\n coadd,wt = get_coadd(splits,ivars,axis=0)\n npol = get_npol(array)\n for i in range(npol):\n cents,p1d = maps.binned_power(coadd[i],bin_edges=bin_edges,mask=mask)\n p1ds[array+str(i)] = p1d.copy()\n mivar = wt[i].mean()\n\n print(array,mivar)\n\nfor i in range(3):\n pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='C')\n for array in dm.arrays:\n npol = get_npol(array)\n if i<npol:\n pl.add(cents,p1ds[array+str(i)],label=array)\n pl.done(\"powers%d.png\" % i)\n"
] | [
[
"numpy.arange",
"numpy.sum",
"numpy.isfinite"
]
] |
yuxuibbs/MCC-Competition-Docs | [
"384726c41434c5a07becb6438c3d2409c6ca6eb4"
] | [
"website/test.py"
] | [
"import pandas as pd\nimport numpy as np\nimport jellyfish\n\ndef create_heats(df, event, num_heats):\n counter = 0\n for row_num, registration_status in enumerate(df[event]):\n if registration_status != '0':\n df.loc[row_num, event] = counter % num_heats + 1\n counter += 1\n\n\nallEventsDict = {\"222\" : \"2x2 Cube\",\n \"333\" : \"Rubik's Cube\",\n \"333oh\" : \"Rubik's Cube: One-Handed\",\n \"333bf\" : \"Rubik's Cube: Blindfolded\",\n \"333fm\" : \"Rubik's Cube: Fewest moves\",\n \"333ft\" : \"Rubik's Cube: With feet\",\n \"333mbf\": \"Rubik's Cube: Multiple Blindfolded\",\n \"444\" : \"4x4 Cube\",\n \"444bf\" : \"4x4 Cube: Blindfolded\",\n \"555\" : \"5x5 Cube\",\n \"555bf\" : \"5x5 Cube: Blindfolded\",\n \"666\" : \"6x6 Cube\",\n \"777\" : \"7x7 Cube\",\n \"clock\" : \"Rubik's Clock\",\n \"minx\" : \"Megaminx\",\n \"pyram\" : \"Pyraminx\",\n \"skewb\" : \"Skewb\",\n \"sq1\" : \"Square-1\"}\n\n\ninput_file = '/home/yuxuan/CubeToaster/Heats/ImaginationStation.csv'\n\nnum_heats = {'222' : 4,\n '333' : 8,\n '333oh' : 2,\n '555' : 3,\n '666' : 2,\n 'minx' : 2\n }\n\ncomp_events = []\n\ndf = pd.read_csv(input_file, dtype=str, sep=',').drop(['Status', 'Country', 'Birth Date', 'Gender', 'Email', 'Guests', 'IP'], axis=1)\n\n\n# df = df.replace('0', np.NaN)\ndf['staff'] = 0\n\nfor event in allEventsDict:\n if event in df:\n comp_events.append(event)\n create_heats(df, event, num_heats[event])\n\ndf['FirstName'] = (df['Name'].str.split(expand=True)[0])\ndf['MRA'] = df['FirstName'].apply(jellyfish.match_rating_codex)\n\nprint(df.head(50))\n\nfor event in comp_events:\n grouped_df = df.groupby(event)\n for key, item in grouped_df:\n if key != '0':\n print(key)\n print(grouped_df.get_group(key)[['Name', event, 'MRA']].sort_values(by='MRA'))\n print()\n print()\n\ndf.to_csv('test.csv')"
] | [
[
"pandas.read_csv"
]
] |
hfurkanbozkurt/ludwig | [
"bfcbd52237c73702764e733ede4351e0146394bd"
] | [
"ludwig/features/category_feature.py"
] | [
"#! /usr/bin/env python\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nfrom typing import Any, Dict, List, Union\n\nimport numpy as np\nimport torch\n\nfrom ludwig.constants import (\n ACCURACY,\n CATEGORY,\n COLUMN,\n FILL_WITH_CONST,\n HIDDEN,\n HITS_AT_K,\n LOGITS,\n LOSS,\n MISSING_VALUE_STRATEGY_OPTIONS,\n NAME,\n PREDICTIONS,\n PROBABILITIES,\n PROBABILITY,\n PROC_COLUMN,\n PROJECTION_INPUT,\n SOFTMAX_CROSS_ENTROPY,\n SUM,\n TIED,\n TYPE,\n)\nfrom ludwig.features.base_feature import BaseFeatureMixin, InputFeature, OutputFeature, PredictModule\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.eval_utils import ConfusionMatrix\nfrom ludwig.utils.math_utils import int_type, softmax\nfrom ludwig.utils.misc_utils import set_default_value, set_default_values\nfrom ludwig.utils.strings_utils import create_vocabulary, UNKNOWN_SYMBOL\n\nlogger = logging.getLogger(__name__)\n\n\nclass _CategoryPreprocessing(torch.nn.Module):\n def __init__(self, metadata: Dict[str, Any]):\n super().__init__()\n self.str2idx = metadata[\"str2idx\"]\n self.unk = self.str2idx[UNKNOWN_SYMBOL]\n\n def forward(self, v: Union[List[str], torch.Tensor]):\n if isinstance(v, torch.Tensor):\n raise ValueError(f\"Unsupported input: {v}\")\n indices = [self.str2idx.get(s.strip(), self.unk) for s in v]\n return torch.tensor(indices, dtype=torch.int32)\n\n\nclass _CategoryPostprocessing(torch.nn.Module):\n def __init__(self, metadata: Dict[str, Any]):\n super().__init__()\n self.idx2str = {i: v for i, v in enumerate(metadata[\"idx2str\"])}\n self.predictions_key = PREDICTIONS\n self.probabilities_key = PROBABILITIES\n self.unk = \"\"\n\n def forward(self, preds: Dict[str, torch.Tensor]) -> Dict[str, Any]:\n predictions = preds[self.predictions_key]\n inv_preds = [self.idx2str.get(pred, self.unk) for pred in predictions]\n return {\n self.predictions_key: inv_preds,\n self.probabilities_key: preds[self.probabilities_key],\n }\n\n\nclass _CategoryPredict(PredictModule):\n def forward(self, inputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:\n logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, self.logits_key)\n probabilities = torch.softmax(logits, -1)\n predictions = torch.argmax(logits, -1)\n predictions = predictions.long()\n\n # EXPECTED SHAPE OF RETURNED TENSORS\n # predictions: [batch_size]\n # probabilities: [batch_size, num_classes]\n # logits: [batch_size, num_classes]\n return {self.predictions_key: predictions, self.probabilities_key: probabilities, self.logits_key: logits}\n\n\nclass CategoryFeatureMixin(BaseFeatureMixin):\n @staticmethod\n def type():\n return CATEGORY\n\n @staticmethod\n def preprocessing_defaults():\n return {\n \"most_common\": 10000,\n \"lowercase\": False,\n \"missing_value_strategy\": FILL_WITH_CONST,\n \"fill_value\": UNKNOWN_SYMBOL,\n }\n\n @staticmethod\n def preprocessing_schema():\n return {\n \"most_common\": {\"type\": \"integer\", \"minimum\": 0},\n \"lowercase\": {\"type\": \"boolean\"},\n \"missing_value_strategy\": {\"type\": \"string\", \"enum\": MISSING_VALUE_STRATEGY_OPTIONS},\n \"fill_value\": {\"type\": \"string\"},\n \"computed_fill_value\": {\"type\": \"string\"},\n }\n\n @staticmethod\n def cast_column(column, backend):\n return column\n\n @staticmethod\n def get_feature_meta(column, preprocessing_parameters, backend):\n column = column.astype(str)\n idx2str, str2idx, str2freq, _, _, _, _ = create_vocabulary(\n column,\n \"stripped\",\n num_most_frequent=preprocessing_parameters[\"most_common\"],\n lowercase=preprocessing_parameters[\"lowercase\"],\n add_special_symbols=False,\n processor=backend.df_engine,\n )\n return {\"idx2str\": idx2str, \"str2idx\": str2idx, \"str2freq\": str2freq, \"vocab_size\": len(str2idx)}\n\n @staticmethod\n def feature_data(column, metadata):\n return column.map(\n lambda x: (\n metadata[\"str2idx\"][x.strip()]\n if x.strip() in metadata[\"str2idx\"]\n else metadata[\"str2idx\"][UNKNOWN_SYMBOL]\n )\n ).astype(int_type(metadata[\"vocab_size\"]))\n\n @staticmethod\n def add_feature_data(\n feature_config, input_df, proc_df, metadata, preprocessing_parameters, backend, skip_save_processed_input\n ):\n proc_df[feature_config[PROC_COLUMN]] = CategoryFeatureMixin.feature_data(\n input_df[feature_config[COLUMN]].astype(str),\n metadata[feature_config[NAME]],\n )\n\n return proc_df\n\n\nclass CategoryInputFeature(CategoryFeatureMixin, InputFeature):\n encoder = \"dense\"\n\n def __init__(self, feature, encoder_obj=None):\n super().__init__(feature)\n self.overwrite_defaults(feature)\n if encoder_obj:\n self.encoder_obj = encoder_obj\n else:\n self.encoder_obj = self.initialize_encoder(feature)\n\n def forward(self, inputs):\n assert isinstance(inputs, torch.Tensor)\n assert (\n inputs.dtype == torch.int8\n or inputs.dtype == torch.int16\n or inputs.dtype == torch.int32\n or inputs.dtype == torch.int64\n )\n assert len(inputs.shape) == 1 or (len(inputs.shape) == 2 and inputs.shape[1] == 1)\n\n if len(inputs.shape) == 1:\n inputs = inputs.unsqueeze(dim=1)\n\n if inputs.dtype == torch.int8 or inputs.dtype == torch.int16:\n inputs = inputs.type(torch.int)\n encoder_output = self.encoder_obj(inputs)\n\n return {\"encoder_output\": encoder_output}\n\n @property\n def input_dtype(self):\n return torch.int32\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([1])\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size(self.encoder_obj.output_shape)\n\n @staticmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n input_feature[\"vocab\"] = feature_metadata[\"idx2str\"]\n\n @staticmethod\n def populate_defaults(input_feature):\n set_default_value(input_feature, TIED, None)\n\n @staticmethod\n def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n return _CategoryPreprocessing(metadata)\n\n\nclass CategoryOutputFeature(CategoryFeatureMixin, OutputFeature):\n decoder = \"classifier\"\n loss = {TYPE: SOFTMAX_CROSS_ENTROPY}\n metric_functions = {LOSS: None, ACCURACY: None, HITS_AT_K: None}\n default_validation_metric = ACCURACY\n num_classes = 0\n top_k = 3\n\n def __init__(self, feature, output_features: Dict[str, OutputFeature]):\n super().__init__(feature, output_features)\n self.overwrite_defaults(feature)\n self.decoder_obj = self.initialize_decoder(feature)\n self._setup_loss()\n self._setup_metrics()\n\n def logits(self, inputs, **kwargs): # hidden\n hidden = inputs[HIDDEN]\n\n # EXPECTED SHAPES FOR RETURNED TENSORS\n # logits: shape [batch_size, num_classes]\n # hidden: shape [batch_size, size of final fully connected layer]\n return {LOGITS: self.decoder_obj(hidden), PROJECTION_INPUT: hidden}\n\n def create_predict_module(self) -> PredictModule:\n return _CategoryPredict()\n\n def get_prediction_set(self):\n return {PREDICTIONS, PROBABILITIES, LOGITS}\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([self.input_size])\n\n @classmethod\n def get_output_dtype(cls):\n return torch.int64\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size([1])\n\n def metric_kwargs(self):\n return dict(top_k=self.top_k)\n\n @staticmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n output_feature[\"num_classes\"] = feature_metadata[\"vocab_size\"]\n output_feature[\"top_k\"] = min(output_feature[\"num_classes\"], output_feature[\"top_k\"])\n\n if isinstance(output_feature[LOSS][\"class_weights\"], (list, tuple)):\n if len(output_feature[LOSS][\"class_weights\"]) != output_feature[\"num_classes\"]:\n raise ValueError(\n \"The length of class_weights ({}) is not compatible with \"\n \"the number of classes ({}) for feature {}. \"\n \"Check the metadata JSON file to see the classes \"\n \"and their order and consider there needs to be a weight \"\n \"for the <UNK> class too.\".format(\n len(output_feature[LOSS][\"class_weights\"]),\n output_feature[\"num_classes\"],\n output_feature[COLUMN],\n )\n )\n\n if isinstance(output_feature[LOSS][\"class_weights\"], dict):\n if feature_metadata[\"str2idx\"].keys() != output_feature[LOSS][\"class_weights\"].keys():\n raise ValueError(\n \"The class_weights keys ({}) are not compatible with \"\n \"the classes ({}) of feature {}. \"\n \"Check the metadata JSON file to see the classes \"\n \"and consider there needs to be a weight \"\n \"for the <UNK> class too.\".format(\n output_feature[LOSS][\"class_weights\"].keys(),\n feature_metadata[\"str2idx\"].keys(),\n output_feature[COLUMN],\n )\n )\n else:\n class_weights = output_feature[LOSS][\"class_weights\"]\n idx2str = feature_metadata[\"idx2str\"]\n class_weights_list = [class_weights[s] for s in idx2str]\n output_feature[LOSS][\"class_weights\"] = class_weights_list\n\n if output_feature[LOSS][\"class_similarities_temperature\"] > 0:\n if \"class_similarities\" in output_feature[LOSS]:\n similarities = output_feature[LOSS][\"class_similarities\"]\n temperature = output_feature[LOSS][\"class_similarities_temperature\"]\n\n curr_row = 0\n first_row_length = 0\n is_first_row = True\n for row in similarities:\n if is_first_row:\n first_row_length = len(row)\n is_first_row = False\n curr_row += 1\n else:\n curr_row_length = len(row)\n if curr_row_length != first_row_length:\n raise ValueError(\n \"The length of row {} of the class_similarities \"\n \"of {} is {}, different from the length of \"\n \"the first row {}. All rows must have \"\n \"the same length.\".format(\n curr_row, output_feature[COLUMN], curr_row_length, first_row_length\n )\n )\n else:\n curr_row += 1\n all_rows_length = first_row_length\n\n if all_rows_length != len(similarities):\n raise ValueError(\n \"The class_similarities matrix of {} has \"\n \"{} rows and {} columns, \"\n \"their number must be identical.\".format(\n output_feature[COLUMN], len(similarities), all_rows_length\n )\n )\n\n if all_rows_length != output_feature[\"num_classes\"]:\n raise ValueError(\n \"The size of the class_similarities matrix of {} is \"\n \"{}, different from the number of classes ({}). \"\n \"Check the metadata JSON file to see the classes \"\n \"and their order and \"\n \"consider <UNK> class too.\".format(\n output_feature[COLUMN], all_rows_length, output_feature[\"num_classes\"]\n )\n )\n\n similarities = np.array(similarities, dtype=np.float32)\n for i in range(len(similarities)):\n similarities[i, :] = softmax(similarities[i, :], temperature=temperature)\n\n output_feature[LOSS][\"class_similarities\"] = similarities\n else:\n raise ValueError(\n \"class_similarities_temperature > 0, \"\n \"but no class_similarities are provided \"\n \"for feature {}\".format(output_feature[COLUMN])\n )\n\n @staticmethod\n def calculate_overall_stats(predictions, targets, train_set_metadata):\n overall_stats = {}\n confusion_matrix = ConfusionMatrix(targets, predictions[PREDICTIONS], labels=train_set_metadata[\"idx2str\"])\n overall_stats[\"confusion_matrix\"] = confusion_matrix.cm.tolist()\n overall_stats[\"overall_stats\"] = confusion_matrix.stats()\n overall_stats[\"per_class_stats\"] = confusion_matrix.per_class_stats()\n\n return overall_stats\n\n def postprocess_predictions(\n self,\n predictions,\n metadata,\n output_directory,\n backend,\n ):\n predictions_col = f\"{self.feature_name}_{PREDICTIONS}\"\n if predictions_col in predictions:\n if \"idx2str\" in metadata:\n predictions[predictions_col] = backend.df_engine.map_objects(\n predictions[predictions_col], lambda pred: metadata[\"idx2str\"][pred]\n )\n\n probabilities_col = f\"{self.feature_name}_{PROBABILITIES}\"\n if probabilities_col in predictions:\n prob_col = f\"{self.feature_name}_{PROBABILITY}\"\n predictions[prob_col] = predictions[probabilities_col].map(max)\n predictions[probabilities_col] = backend.df_engine.map_objects(\n predictions[probabilities_col], lambda pred: pred.tolist()\n )\n if \"idx2str\" in metadata:\n for i, label in enumerate(metadata[\"idx2str\"]):\n key = f\"{probabilities_col}_{label}\"\n\n # Use default param to force a capture before the loop completes, see:\n # https://stackoverflow.com/questions/2295290/what-do-lambda-function-closures-capture\n predictions[key] = backend.df_engine.map_objects(\n predictions[probabilities_col],\n lambda prob, i=i: prob[i],\n )\n\n top_k_col = f\"{self.feature_name}_predictions_top_k\"\n if top_k_col in predictions:\n if \"idx2str\" in metadata:\n predictions[top_k_col] = backend.df_engine.map_objects(\n predictions[top_k_col], lambda pred_top_k: [metadata[\"idx2str\"][pred] for pred in pred_top_k]\n )\n\n return predictions\n\n @staticmethod\n def populate_defaults(output_feature):\n # If Loss is not defined, set an empty dictionary\n set_default_value(output_feature, LOSS, {})\n\n # Populate the default values for LOSS if they aren't defined already\n set_default_values(\n output_feature[LOSS],\n {\n TYPE: \"softmax_cross_entropy\",\n \"labels_smoothing\": 0,\n \"class_weights\": 1,\n \"robust_lambda\": 0,\n \"confidence_penalty\": 0,\n \"class_similarities_temperature\": 0,\n \"weight\": 1,\n },\n )\n\n set_default_values(\n output_feature, {\"top_k\": 3, \"dependencies\": [], \"reduce_input\": SUM, \"reduce_dependencies\": SUM}\n )\n\n @staticmethod\n def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n return _CategoryPostprocessing(metadata)\n"
] | [
[
"torch.Size",
"torch.argmax",
"torch.tensor",
"numpy.array",
"torch.softmax"
]
] |
ZongSingHuang/Elite-Opposition-Based-Golden-Sine-Whale-Optimization-Algorithm | [
"468b89aaa9cae46b87ce9595cd76b5f97f6c8553"
] | [
"EGolden_SWOA.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 27 15:45:56 2020\n\n@author: ZongSing_NB\n\nMain reference:\nhttp://www.ejournal.org.cn/EN/10.3969/j.issn.0372-2112.2019.10.020\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass EGolden_SWOA():\n def __init__(self, fitness, D=30, P=20, G=500, ub=1, lb=0,\n b=1, a_max=2, a_min=0, a2_max=-1, a2_min=-2, l_max=1, l_min=-1):\n self.fitness = fitness\n self.D = D\n self.P = P\n self.G = G\n self.ub = ub\n self.lb = lb\n self.a_max = a_max\n self.a_min = a_min\n self.a2_max = a2_max\n self.a2_min = a2_min\n self.l_max = l_max\n self.l_min = l_min\n self.b = b\n \n self.gbest_X = np.zeros([self.D])\n self.gbest_F = np.inf\n self.loss_curve = np.zeros(self.G)\n \n def opt(self):\n # 初始化\n self.X = np.random.uniform(low=self.lb, high=self.ub, size=[self.P, self.D])\n tao = (np.sqrt(5)-1)/2\n x1 = -np.pi+(1-tao)\n x2 = -np.pi+tao*2*np.pi\n \n # 迭代\n for g in range(self.G):\n # OBL\n self.X, F = self.OBL()\n \n # 更新最佳解\n if np.min(F) < self.gbest_F:\n idx = F.argmin()\n self.gbest_X = self.X[idx].copy()\n self.gbest_F = F.min()\n \n # 收斂曲線\n self.loss_curve[g] = self.gbest_F\n \n # 更新\n a = self.a_max - (self.a_max-self.a_min)*(g/self.G)\n \n for i in range(self.P):\n p = np.random.uniform()\n r1 = np.random.uniform()\n r2 = np.random.uniform()\n A = 2*a*r1 - a\n C = 2*r2\n \n if np.abs(A)>=1:\n X_rand = self.X[np.random.randint(low=0, high=self.P, size=self.D), :]\n X_rand = np.diag(X_rand).copy()\n D = np.abs(C*X_rand - self.X[i, :])\n self.X[i, :] = X_rand - A*D # (4)\n else:\n if p<0.5:\n D = np.abs(C*self.gbest_X - self.X[i, :])\n self.X[i, :] = self.gbest_X - A*D # (1)\n else:\n r3 = 2*np.pi*np.random.uniform()\n r4 = np.pi*np.random.uniform()\n self.X[i, :] = self.X[i, :]*np.abs(np.sin(r3)) + \\\n r4*np.sin(r3)*np.abs(x1*self.gbest_X-x2*self.X[i, :]) # (9)\n \n # 邊界處理\n self.X = np.clip(self.X, self.lb, self.ub)\n \n \n def plot_curve(self):\n plt.figure()\n plt.title('loss curve ['+str(round(self.loss_curve[-1], 3))+']')\n plt.plot(self.loss_curve, label='loss')\n plt.grid()\n plt.legend()\n plt.show()\n\n def OBL(self):\n # 產生反向解\n k = np.random.uniform()\n alpha = self.X.min(axis=0)\n beta = self.X.max(axis=0)\n obl_X = k*(alpha+beta) - self.X # (5)\n \n # 對反向解進行邊界處理\n rand_X = np.random.uniform(low=alpha, high=beta, size=[self.P, self.D]) # (6)\n mask = np.logical_or(obl_X>self.ub, obl_X<self.lb)\n obl_X[mask] = rand_X[mask].copy()\n \n # 取得新解\n concat_X = np.vstack([obl_X, self.X])\n F = self.fitness(concat_X)\n top_idx = F.argsort()[:self.P]\n top_F = F[top_idx].copy()\n top_X = concat_X[top_idx].copy()\n \n return top_X, top_F\n\n"
] | [
[
"numpy.random.uniform",
"numpy.logical_or",
"numpy.vstack",
"matplotlib.pyplot.legend",
"numpy.sin",
"numpy.zeros",
"numpy.diag",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.clip",
"numpy.min",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.random.randint"
]
] |
qAp/kgl_deepfake | [
"d3ee36d704d82d5d72068ea16276a88b5746c8de"
] | [
"face_detection/lightDSFD/data/widerface.py"
] | [
"from __future__ import division , print_function\n\"\"\"WIDER Face Dataset Classes\nauthor: swordli\n\"\"\"\n#from .config import HOME\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport cv2\nimport numpy as np\nsys.path.append(\"/f/home/jianli/code/s3fd.180716/\")\n#from utils.augmentations import SSDAugmentation\nimport scipy.io\nimport pdb\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\nWIDERFace_CLASSES = ['face'] # always index 0\n# note: if you used our download scripts, this should be right\nWIDERFace_ROOT = \"/data/home/swordli/widerface_data/\"\n\n\nclass WIDERFaceAnnotationTransform(object):\n \"\"\"Transforms a WIDERFace annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n\n Arguments:\n class_to_ind (dict, optional): dictionary lookup of classnames -> indexes\n (default: alphabetic indexing of VOC's 20 classes)\n keep_difficult (bool, optional): keep difficult instances or not\n (default: False)\n height (int): height\n width (int): width\n \"\"\"\n\n def __init__(self, class_to_ind=None):\n self.class_to_ind = class_to_ind or dict(\n zip(WIDERFace_CLASSES, range(len(WIDERFace_CLASSES))))\n\n def __call__(self, target, width, height):\n \"\"\"\n Arguments:\n target (annotation) : the target annotation to be made usable\n will be an ET.Element\n Returns:\n a list containing lists of bounding boxes [bbox coords, class name]\n \"\"\"\n for i in range(len(target)):\n\n '''\n if target[i][0] < 2 : target[i][0] = 2\n if target[i][1] < 2 : target[i][1] = 2\n if target[i][2] > width-2 : target[i][2] = width - 2\n if target[i][3] > height-2 : target[i][3] = height - 2\n '''\n target[i][0] = float(target[i][0]) / width \n target[i][1] = float(target[i][1]) / height \n target[i][2] = float(target[i][2]) / width \n target[i][3] = float(target[i][3]) / height \n '''\n if target[i][0] < 0.0001:\n target[i][0] = 0.0001 \n if target[i][1] < 0.0001:\n target[i][1] = 0.0001 \n if target[i][2] > 0.9999:\n target[i][2] = 0.9999\n if target[i][3] > 0.9999:\n target[i][3] = 0.9999\n '''\n # filter error bbox\n \n #if target[i][0] >= target[i][2] or target[i][1] >= target[i][3] or target[i][0] < 0 or target[i][1] < 0 or target[i][2] > 1 or target[i][3] > 1 :\n # print (\"error bbox: \" , target[i])\n \n '''\n assert target[i][0] >= 0.001\n assert target[i][1] >= 0.001\n assert target[i][2] <= 0.999\n assert target[i][3] <= 0.999\n assert target[i][0] < target[i][2]\n assert target[i][1] < target[i][3]\n '''\n #res.append( [ target[i][0], target[i][1], target[i][2], target[i][3], target[i][4] ] )\n return target # [[xmin, ymin, xmax, ymax, label_ind], ... ]\n\nclass WIDERFaceDetection(data.Dataset):\n \"\"\"WIDERFace Detection Dataset Object \n http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/\n\n input is image, target is annotation\n\n Arguments:\n root (string): filepath to WIDERFace folder.\n image_set (string): imageset to use (eg. 'train', 'val', 'test')\n transform (callable, optional): transformation to perform on the\n input image\n target_transform (callable, optional): transformation to perform on the\n target `annotation`\n (eg: take in caption string, return tensor of word indices)\n dataset_name (string, optional): which dataset to load\n (default: 'WIDERFace')\n \"\"\"\n\n def __init__(self, root,\n image_sets='train',\n transform=None, target_transform=WIDERFaceAnnotationTransform(),\n dataset_name='WIDER Face'):\n\n self.root = root\n self.image_set = image_sets\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n '''\n self._annopath = osp.join('%s', 'Annotations', '%s.xml')\n self._imgpath = osp.join('%s', 'JPEGImages', '%s.jpg')\n '''\n self.img_ids = list()\n self.label_ids = list()\n self.event_ids = list()\n '''\n for (year, name) in image_sets:\n rootpath = osp.join(self.root, 'VOC' + year)\n for line in open(osp.join(rootpath, 'ImageSets', 'Main', name + '.txt')):\n self.ids.append((rootpath, line.strip()))\n '''\n if self.image_set == 'train':\n path_to_label = osp.join ( self.root , 'wider_face_split' ) \n path_to_image = osp.join ( self.root , 'WIDER_train/images' )\n fname = \"wider_face_train.mat\"\n\n if self.image_set == 'val':\n path_to_label = osp.join ( self.root , 'wider_face_split' ) \n path_to_image = osp.join ( self.root , 'WIDER_val/images' )\n fname = \"wider_face_val.mat\"\n\n if self.image_set == 'test':\n path_to_label = osp.join ( self.root , 'wider_face_split' ) \n path_to_image = osp.join ( self.root , 'WIDER_test/images' )\n fname = \"wider_face_test.mat\"\n\n self.path_to_label = path_to_label\n self.path_to_image = path_to_image\n self.fname = fname\n self.f = scipy.io.loadmat(osp.join(self.path_to_label, self.fname))\n self.event_list = self.f.get('event_list')\n self.file_list = self.f.get('file_list')\n self.face_bbx_list = self.f.get('face_bbx_list')\n \n self._load_widerface()\n\n def _load_widerface(self):\n\n error_bbox = 0 \n train_bbox = 0\n for event_idx, event in enumerate(self.event_list):\n directory = event[0][0]\n for im_idx, im in enumerate(self.file_list[event_idx][0]):\n im_name = im[0][0]\n\n if self.image_set in [ 'test' , 'val']:\n self.img_ids.append( osp.join(self.path_to_image, directory, im_name + '.jpg') )\n self.event_ids.append( directory )\n self.label_ids.append([])\n continue\n\n face_bbx = self.face_bbx_list[event_idx][0][im_idx][0]\n bboxes = []\n for i in range(face_bbx.shape[0]):\n # filter bbox\n if face_bbx[i][2] < 2 or face_bbx[i][3] < 2 or face_bbx[i][0] < 0 or face_bbx[i][1] < 0:\n error_bbox +=1\n #print (face_bbx[i])\n continue \n train_bbox += 1 \n xmin = float(face_bbx[i][0])\n ymin = float(face_bbx[i][1])\n xmax = float(face_bbx[i][2]) + xmin -1 \t\n ymax = float(face_bbx[i][3]) + ymin -1\n bboxes.append([xmin, ymin, xmax, ymax, 0])\n\n if ( len(bboxes)==0 ): # filter bbox will make bbox none\n continue\n self.img_ids.append( osp.join(self.path_to_image, directory, im_name + '.jpg') )\n self.event_ids.append( directory )\n self.label_ids.append( bboxes )\n #yield DATA(os.path.join(self.path_to_image, directory, im_name + '.jpg'), bboxes)\n print(\"Error bbox number to filter : %d, bbox number: %d\" %(error_bbox , train_bbox))\n \n\n def __getitem__(self, index):\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.img_ids)\n\n def pull_item(self, index):\n\n target = self.label_ids[index]\n img = cv2.imread(self.img_ids[index])\n\n height, width, channels = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n\n if self.transform is not None:\n target = np.array(target)\n # data augmentation\n img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])\n #self.vis_detections_v2(img , boxes , index)\n # to rgb\n #img = img[:, :, (2, 1, 0)]\n # img = img.transpose(2, 0, 1)\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n # return torch.from_numpy(img), target, height, width\n\n def vis_detections(self , im, dets, image_name ):\n\n cv2.imwrite(\"./tmp_res/\"+str(image_name)+\"ori.jpg\" , im)\n print (im)\n size = im.shape[0]\n dets = dets*size\n \"\"\"Draw detected bounding boxes.\"\"\"\n class_name = 'face'\n #im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n for i in range(len(dets)):\n bbox = dets[i, :4]\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0] + 1,\n bbox[3] - bbox[1] + 1, fill=False,\n edgecolor='red', linewidth=2.5)\n )\n plt.axis('off')\n plt.tight_layout()\n plt.savefig('./tmp_res/'+str(image_name)+\".jpg\", dpi=fig.dpi)\n\n def vis_detections_v2(self , im, dets, image_name ):\n size = im.shape[0]\n dets = dets*size\n \"\"\"Draw detected bounding boxes.\"\"\"\n class_name = 'face'\n for i in range(len(dets)):\n bbox = dets[i, :4]\n #print ((bbox[0],bbox[1]), (bbox[2],bbox[3]) )\n cv2.rectangle( im , (int(bbox[0]),int(bbox[1])), (int(bbox[2]),int(bbox[3])), (0,255,0),5 )\n cv2.imwrite('./tmp_res/'+str(image_name)+\".jpg\", im)\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n PIL img\n '''\n return cv2.imread(self.img_ids[index], cv2.IMREAD_COLOR)\n\n def pull_event(self, index):\n return self.event_ids[index]\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.img_ids[index]\n anno = self.label_ids[index]\n gt = self.target_transform(anno, 1, 1)\n return img_id.split(\"/\")[-1], gt\n\n def pull_tensor(self, index):\n '''Returns the original image at an index in tensor form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n tensorized version of img, squeezed\n '''\n return torch.Tensor(self.pull_image(index)).unsqueeze_(0)\n\n'''\nfrom utils.augmentations import SSDAugmentation\nif __name__ == '__main__': \n dataset = WIDERFaceDetection( root=WIDERFace_ROOT, transform=SSDAugmentation(640,(104,117,123) ) )\n for i in range(10000):\n img, tar = dataset.pull_item(i)\n print (sta_w)\n'''\n"
] | [
[
"matplotlib.pyplot.axis",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.expand_dims",
"torch.from_numpy",
"numpy.array",
"matplotlib.pyplot.Rectangle"
]
] |
ccjoechou/tvm | [
"779dc51e1332f417fa4c304b595ce76891dfc33a"
] | [
"python/tvm/meta_schedule/cost_model/cost_model.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Meta Schedule CostModel.\"\"\"\nimport ctypes\nfrom typing import List\n\nimport numpy as np # type: ignore\nfrom tvm._ffi import register_object\nfrom tvm.runtime import Object\n\nfrom .. import _ffi_api\nfrom ..runner import RunnerResult\nfrom ..search_strategy import MeasureCandidate\nfrom ..tune_context import TuneContext\nfrom ..utils import _get_hex_address, check_override\n\n\n@register_object(\"meta_schedule.CostModel\")\nclass CostModel(Object):\n \"\"\"Cost model.\"\"\"\n\n def load(self, path: str) -> None:\n \"\"\"Load the cost model from given file location.\n\n Parameters\n ----------\n path : str\n The file path.\n \"\"\"\n _ffi_api.CostModelLoad(self, path) # type: ignore # pylint: disable=no-member\n\n def save(self, path: str) -> None:\n \"\"\"Save the cost model to given file location.\n\n Parameters\n ----------\n path : str\n The file path.\n \"\"\"\n _ffi_api.CostModelSave(self, path) # type: ignore # pylint: disable=no-member\n\n def update(\n self,\n context: TuneContext,\n candidates: List[MeasureCandidate],\n results: List[RunnerResult],\n ) -> None:\n \"\"\"Update the cost model given running results.\n\n Parameters\n ----------\n context : TuneContext,\n The tuning context.\n candidates : List[MeasureCandidate]\n The measure candidates.\n results : List[RunnerResult]\n The running results of the measure candidates.\n \"\"\"\n _ffi_api.CostModelUpdate(self, context, candidates, results) # type: ignore # pylint: disable=no-member\n\n def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:\n \"\"\"Update the cost model given running results.\n\n Parameters\n ----------\n context : TuneContext,\n The tuning context.\n candidates : List[MeasureCandidate]\n The measure candidates.\n\n Return\n ------\n result : np.ndarray\n The predicted normalized score.\n \"\"\"\n n = len(candidates)\n results = np.zeros(shape=(n,), dtype=\"float64\")\n _ffi_api.CostModelPredict( # type: ignore # pylint: disable=no-member\n self,\n context,\n candidates,\n results.ctypes.data_as(ctypes.c_void_p),\n )\n return results\n\n\n@register_object(\"meta_schedule.PyCostModel\")\nclass PyCostModel(CostModel):\n \"\"\"An abstract CostModel with customized methods on the python-side.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n\n @check_override(self.__class__, CostModel)\n def f_load(path: str) -> None:\n self.load(path)\n\n @check_override(self.__class__, CostModel)\n def f_save(path: str) -> None:\n self.save(path)\n\n @check_override(self.__class__, CostModel)\n def f_update(\n context: TuneContext,\n candidates: List[MeasureCandidate],\n results: List[RunnerResult],\n ) -> None:\n self.update(context, candidates, results)\n\n @check_override(self.__class__, CostModel)\n def f_predict(context: TuneContext, candidates: List[MeasureCandidate], return_ptr) -> None:\n n = len(candidates)\n return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_double))\n array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(n,))\n array_wrapper[:] = self.predict(context, candidates)\n assert (\n array_wrapper.dtype == \"float64\"\n ), \"ValueError: Invalid data type returned from CostModel Predict!\"\n\n def f_as_string() -> str:\n return str(self)\n\n self.__init_handle_by_constructor__(\n _ffi_api.CostModelPyCostModel, # type: ignore # pylint: disable=no-member\n f_load,\n f_save,\n f_update,\n f_predict,\n f_as_string,\n )\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({_get_hex_address(self.handle)})\"\n"
] | [
[
"numpy.ctypeslib.as_array",
"numpy.zeros"
]
] |
JeroenDM/acrobotics | [
"d734ca25f40015e5c5ff019402a83504783c13cd"
] | [
"tests/test_link.py"
] | [
"from acrobotics.link import Link, LinkKinematics, DHLink, JointType\n\nimport numpy as np\nimport casadi as ca\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nfrom acrobotics.geometry import Scene\nfrom acrobotics.shapes import Box\nfrom numpy.testing import assert_almost_equal\n\n\ndef DenHarMat(theta, alpha, a, d):\n \"\"\" Use code from someone else to compare with:\n https://stackoverflow.com/questions/17891024/forward-kinematics-data-modeling\n \"\"\"\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n cos_alpha = np.cos(alpha)\n sin_alpha = np.sin(alpha)\n\n return np.array(\n [\n [cos_theta, -sin_theta * cos_alpha, sin_theta * sin_alpha, a * cos_theta],\n [sin_theta, cos_theta * cos_alpha, -cos_theta * sin_alpha, a * sin_theta],\n [0, sin_alpha, cos_alpha, d],\n [0, 0, 0, 1],\n ]\n )\n\n\nclass TestLinkKinematics:\n def test_init(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n link1 = LinkKinematics(dh_params, JointType.revolute)\n link2 = LinkKinematics(dh_params, JointType.prismatic)\n\n assert link1.joint_type == JointType.revolute\n assert link2.joint_type == JointType.prismatic\n assert link1.dh == dh_params\n assert link2.dh == dh_params\n\n def test_dh_matrix(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n link1 = LinkKinematics(dh_params, JointType.revolute)\n link2 = LinkKinematics(dh_params, JointType.prismatic)\n\n q1 = 1.2\n T1 = link1.get_link_relative_transform(q1)\n T1_desired = DenHarMat(q1, dh_params.alpha, dh_params.a, dh_params.d)\n assert_almost_equal(T1, T1_desired)\n\n d2 = 0.75\n T2 = link2.get_link_relative_transform(d2)\n T2_desired = DenHarMat(dh_params.theta, dh_params.alpha, dh_params.a, d2)\n assert_almost_equal(T2, T2_desired)\n\n def test_dh_matrix_casadi(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n link1 = LinkKinematics(dh_params, JointType.revolute)\n link2 = LinkKinematics(dh_params, JointType.prismatic)\n\n opti = ca.Opti()\n\n q1 = opti.variable()\n T1 = ca.Function(\"T1\", [q1], [link1.get_link_relative_transform_casadi(q1)])\n T1_desired = DenHarMat(1.2, dh_params.alpha, dh_params.a, dh_params.d)\n assert_almost_equal(np.array(T1(1.2)), T1_desired)\n\n d1 = opti.variable()\n T2 = ca.Function(\"T2\", [d1], [link2.get_link_relative_transform_casadi(d1)])\n T2_desired = DenHarMat(dh_params.theta, dh_params.alpha, dh_params.a, 0.75)\n assert_almost_equal(np.array(T2(0.75)), T2_desired)\n\n\nclass TestLink:\n def test_init(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n geometry = Scene([Box(1, 2, 3)], [np.eye(4)])\n link1 = Link(dh_params, JointType.revolute, geometry)\n\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n link1.plot(ax, np.eye(4))\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.eye",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.array",
"numpy.sin"
]
] |
jeikabu/lumberyard | [
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115"
] | [
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/tests/cudadrv/test_deallocations.py"
] | [
"from __future__ import division\n\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom numba import cuda, config\nfrom numba.cuda.testing import unittest, skip_on_cudasim\nfrom numba.tests.support import captured_stderr\n\n\n@skip_on_cudasim('not supported on CUDASIM')\nclass TestDeallocation(unittest.TestCase):\n def test_max_pending_count(self):\n # get deallocation manager and flush it\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n # deallocate to maximum count\n for i in range(config.CUDA_DEALLOCS_COUNT):\n cuda.to_device(np.arange(1))\n self.assertEqual(len(deallocs), i + 1)\n # one more to trigger .clear()\n cuda.to_device(np.arange(1))\n self.assertEqual(len(deallocs), 0)\n\n def test_max_pending_bytes(self):\n # get deallocation manager and flush it\n ctx = cuda.current_context()\n deallocs = ctx.deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n mi = ctx.get_memory_info()\n\n max_pending = 10**6 # 1MB\n old_ratio = config.CUDA_DEALLOCS_RATIO\n try:\n # change to a smaller ratio\n config.CUDA_DEALLOCS_RATIO = max_pending / mi.total\n self.assertEqual(deallocs._max_pending_bytes, max_pending)\n\n # deallocate half the max size\n cuda.to_device(np.ones(max_pending // 2, dtype=np.int8))\n self.assertEqual(len(deallocs), 1)\n\n # deallocate another remaining\n cuda.to_device(np.ones(max_pending - deallocs._size, dtype=np.int8))\n self.assertEqual(len(deallocs), 2)\n\n # another byte to trigger .clear()\n cuda.to_device(np.ones(1, dtype=np.int8))\n self.assertEqual(len(deallocs), 0)\n finally:\n # restore old ratio\n config.CUDA_DEALLOCS_RATIO = old_ratio\n\n\n@skip_on_cudasim(\"defer_cleanup has no effect in CUDASIM\")\nclass TestDeferCleanup(unittest.TestCase):\n def test_basic(self):\n harr = np.arange(5)\n darr1 = cuda.to_device(harr)\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n with cuda.defer_cleanup():\n darr2 = cuda.to_device(harr)\n del darr1\n self.assertEqual(len(deallocs), 1)\n del darr2\n self.assertEqual(len(deallocs), 2)\n deallocs.clear()\n self.assertEqual(len(deallocs), 2)\n\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n def test_nested(self):\n harr = np.arange(5)\n darr1 = cuda.to_device(harr)\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n with cuda.defer_cleanup():\n with cuda.defer_cleanup():\n darr2 = cuda.to_device(harr)\n del darr1\n self.assertEqual(len(deallocs), 1)\n del darr2\n self.assertEqual(len(deallocs), 2)\n deallocs.clear()\n self.assertEqual(len(deallocs), 2)\n deallocs.clear()\n self.assertEqual(len(deallocs), 2)\n\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n def test_exception(self):\n harr = np.arange(5)\n darr1 = cuda.to_device(harr)\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n class CustomError(Exception):\n pass\n\n with self.assertRaises(CustomError):\n with cuda.defer_cleanup():\n darr2 = cuda.to_device(harr)\n del darr2\n self.assertEqual(len(deallocs), 1)\n deallocs.clear()\n self.assertEqual(len(deallocs), 1)\n raise CustomError\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n del darr1\n self.assertEqual(len(deallocs), 1)\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n\nclass TestDeferCleanupAvail(unittest.TestCase):\n def test_context_manager(self):\n # just make sure the API is available\n with cuda.defer_cleanup():\n pass\n\n\n@skip_on_cudasim('not supported on CUDASIM')\nclass TestDel(unittest.TestCase):\n \"\"\"\n Ensure resources are deleted properly without ignored exception.\n \"\"\"\n @contextmanager\n def check_ignored_exception(self, ctx):\n with captured_stderr() as cap:\n yield\n ctx.deallocations.clear()\n self.assertFalse(cap.getvalue())\n\n def test_stream(self):\n ctx = cuda.current_context()\n stream = ctx.create_stream()\n with self.check_ignored_exception(ctx):\n del stream\n\n def test_event(self):\n ctx = cuda.current_context()\n event = ctx.create_event()\n with self.check_ignored_exception(ctx):\n del event\n\n def test_pinned_memory(self):\n ctx = cuda.current_context()\n mem = ctx.memhostalloc(32)\n with self.check_ignored_exception(ctx):\n del mem\n\n def test_mapped_memory(self):\n ctx = cuda.current_context()\n mem = ctx.memhostalloc(32, mapped=True)\n with self.check_ignored_exception(ctx):\n del mem\n\n def test_device_memory(self):\n ctx = cuda.current_context()\n mem = ctx.memalloc(32)\n with self.check_ignored_exception(ctx):\n del mem\n\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"numpy.arange",
"numpy.ones"
]
] |
basaks/uncover-ml | [
"167af7666845e2f0936aa4fc0e60abf8b1984219"
] | [
"scripts/intersect_rasters.py"
] | [
"from pathlib import Path\n\nimport numpy as np\nimport rasterio\nimport geopandas as gpd\nfrom joblib import Parallel, delayed\n\ndata_location = \\\n Path(\"/g/data/ge3/covariates/national_albers_filled_new/albers_cropped/\")\n# Read points from shapefile\n\nshapefile_location = Path(\"/g/data/ge3/aem_sections/AEM_covariates/\")\n\n# local\n# k = data_location.joinpath('data', 'LATITUDE_GRID1.tif')\n# shapefile_location = Path(\"configs/data\")\n# shp = shapefile_location.joinpath('geochem_sites.shp')\n\ngeotifs = {\n \"relief_radius4.tif\": \"relief4\",\n \"national_Wii_RF_multirandomforest_prediction.tif\": \"mrf_pred\",\n \"MvrtpLL_smooth.tif\": \"mrvtpLL_s\",\n \"MvrtpLL_fin.tif\": \"mvrtpLL_f\",\n \"LOC_distance_to_coast.tif\": \"LOC_dis\",\n \"Gravity_land.tif\": \"gravity\",\n \"dem_fill.tif\": \"dem\",\n \"Clim_Prescott_LindaGregory.tif\": \"clim_linda\",\n \"clim_PTA_albers.tif\": \"clim_alber\",\n \"SagaWET9cell_M.tif\": \"sagawet\",\n \"ceno_euc_aust1.tif\": \"ceno_euc\"\n}\n\n\ndownscale_factor = 2 # keep 1 point in a 2x2 cell\n\n\ndef intersect_and_sample_shp(shp: Path):\n print(\"====================================\\n\", f\"intersecting {shp.as_posix()}\")\n pts = gpd.read_file(shp)\n coords = np.array([(p.x, p.y) for p in pts.geometry])\n tif_name = list(geotifs.keys())[0]\n tif = data_location.joinpath(tif_name)\n orig_cols = pts.columns\n with rasterio.open(tif) as src:\n # resample data to target shape\n data = src.read(\n out_shape=(\n src.count,\n int(src.height / downscale_factor),\n int(src.width / downscale_factor)\n ),\n resampling=rasterio.enums.Resampling.bilinear\n )\n # scale image transform\n transform = src.transform * src.transform.scale(\n (src.width / data.shape[-1]),\n (src.height / data.shape[-2])\n )\n pts[\"rows\"], pts[\"cols\"] = rasterio.transform.rowcol(transform, coords[:, 0], coords[:, 1])\n\n pts_deduped = pts.drop_duplicates(subset=['rows', 'cols'])[orig_cols]\n coords_deduped = np.array([(p.x, p.y) for p in pts_deduped.geometry])\n\n for k, v in geotifs.items():\n print(f\"adding {k} to output dataframe\")\n with rasterio.open(data_location.joinpath(k)) as src:\n pts_deduped[v] = [x[0] for x in src.sample(coords_deduped)]\n pts_deduped.to_file(Path('out').joinpath(shp.name))\n # pts.to_csv(Path(\"out\").joinpath(shp.stem + \".csv\"), index=False)\n\n\nrets = Parallel(\n n_jobs=-1,\n verbose=100,\n)(delayed(intersect_and_sample_shp)(s) for s in shapefile_location.glob(\"*.shp\"))\n\n"
] | [
[
"numpy.array"
]
] |
wnov/vega | [
"19256aca4d047bfad3b461f0a927e1c2abb9eb03"
] | [
"vega/core/pipeline/fully_train_pipe_step.py"
] | [
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"Fully Train PipeStep that used in Pipeline.\"\"\"\nimport os\nimport logging\nimport subprocess\nimport pickle\nimport vega\nfrom .pipe_step import PipeStep\nfrom zeus.common import ClassFactory, ClassType\nfrom zeus.common import FileOps, TaskOps\nfrom ..scheduler import create_master\nfrom zeus.common.general import General\nfrom zeus.report import Report, ReportRecord\nfrom vega.core.pipeline.conf import PipeStepConfig, PipelineConfig\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](ClassType.PIPE_STEP)\nclass FullyTrainPipeStep(PipeStep):\n \"\"\"FullyTrainPipeStep is the implementation class of PipeStep.\n\n Fully train is the last pipe step in pipeline, we provide horovrd or local trainer\n for user to choose.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n logger.info(\"init FullyTrainPipeStep...\")\n\n def do(self):\n \"\"\"Start to run fully train with horovod or local trainer.\"\"\"\n logger.info(\"FullyTrainPipeStep started...\")\n cls_trainer = ClassFactory.get_cls('trainer', \"Trainer\")\n if cls_trainer.config.distributed:\n self._do_distributed_fully_train()\n else:\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.master = create_master(update_func=Report().update_report)\n self._train_multi_models(records)\n Report().output_step_all_records(\n step_name=self.task.step_name, weights_file=True, performance=True)\n self.master.close_client()\n Report().backup_output_path()\n\n def _get_current_step_records(self):\n step_name = self.task.step_name\n models_folder = PipeStepConfig.pipe_step.get(\"models_folder\")\n records = []\n cur_index = PipelineConfig.steps.index(step_name)\n if cur_index >= 1 or models_folder:\n # records = Report().get_pareto_front_records(PipelineConfig.steps[cur_index - 1])\n if not models_folder:\n models_folder = FileOps.join_path(\n TaskOps().local_output_path, PipelineConfig.steps[cur_index - 1])\n models_folder = models_folder.replace(\n \"{local_base_path}\", TaskOps().local_base_path)\n records = Report().load_records_from_model_folder(models_folder)\n else:\n records = [ReportRecord(step_name, 0)]\n logging.debug(\"Records: {}\".format(records))\n for record in records:\n record.step_name = step_name\n return records\n\n def _train_single_model(self, model_desc=None, model_id=None):\n cls_trainer = ClassFactory.get_cls('trainer', \"Trainer\")\n step_name = self.task.step_name\n if model_desc is not None:\n sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name)\n record = ReportRecord().load_dict(sample)\n logging.debug(\"Broadcast Record=%s\", str(record))\n Report().broadcast(record)\n trainer = cls_trainer(model_desc=model_desc, id=model_id)\n else:\n trainer = cls_trainer(None, 0)\n # resume training\n if vega.is_torch_backend() and General._resume:\n trainer.load_checkpoint = True\n trainer._resume_training = True\n if cls_trainer.config.distributed:\n self._do_distributed_fully_train()\n else:\n self._do_single_fully_train(trainer)\n\n def _train_single_gpu_model(self, trainer):\n evaluator = self._get_evaluator(trainer.worker_id)\n self.master.run(trainer, evaluator)\n\n def _train_single_npu_model(self, trainer):\n temp_rank_file = os.environ.get('RANK_TABLE_FILE', None)\n temp_rank_size = os.environ['RANK_SIZE']\n os.environ.pop('RANK_TABLE_FILE', None)\n os.environ['RANK_SIZE'] = '1'\n evaluator = self._get_evaluator(trainer.worker_id)\n self.master.run(trainer, evaluator)\n if temp_rank_file is not None:\n os.environ['RANK_TABLE_FILE'] = temp_rank_file\n os.environ['RANK_SIZE'] = temp_rank_size\n\n def _do_single_fully_train(self, trainer):\n if os.environ['DEVICE_CATEGORY'] == 'GPU':\n self._train_single_gpu_model(trainer)\n elif os.environ['DEVICE_CATEGORY'] == 'NPU':\n self._train_single_npu_model(trainer)\n\n def _train_multi_models(self, records):\n for record in records:\n self._train_single_model(record.desc, record.worker_id)\n self.master.join()\n\n def _get_evaluator(self, worker_id):\n if not PipeStepConfig.evaluator_enable:\n return None\n cls_evaluator = ClassFactory.get_cls('evaluator', \"Evaluator\")\n evaluator = cls_evaluator({\"step_name\": self.task.step_name, \"worker_id\": worker_id})\n return evaluator\n\n def _do_horovod_fully_train(self):\n pwd_dir = os.path.dirname(os.path.abspath(__file__))\n cf_file = os.path.join(pwd_dir, 'cf.pickle')\n cf_content = {'registry': ClassFactory.__registry__,\n 'general_config': General().to_json(),\n 'pipe_step_config': PipeStepConfig().to_json()}\n with open(cf_file, 'wb') as f:\n pickle.dump(cf_content, f)\n cf_file_remote = os.path.join(self.task.local_base_path, 'cf.pickle')\n FileOps.copy_file(cf_file, cf_file_remote)\n if os.environ.get('DLS_TASK_NUMBER') is None:\n # local cluster\n worker_ips = '127.0.0.1'\n if General.cluster.master_ip is not None and General.cluster.master_ip != '127.0.0.1':\n worker_ips = General.cluster.master_ip\n for ip in General.cluster.slaves:\n worker_ips = worker_ips + ',' + ip\n cmd = ['bash', '{}/horovod/run_cluster_horovod_train.sh'.format(pwd_dir),\n str(self.world_device_size), cf_file_remote, worker_ips]\n else:\n # Roma\n cmd = ['bash', '{}/horovod/run_horovod_train.sh'.format(pwd_dir),\n str(self.world_device_size), cf_file_remote]\n proc = subprocess.Popen(cmd, env=os.environ)\n proc.wait()\n\n def _do_hccl_fully_train(self):\n origin_parallel_fully_train = General.parallel_fully_train\n General.parallel_fully_train = True\n General.dft = True\n cls_trainer = ClassFactory.get_cls('trainer', \"Trainer\")\n self.master = create_master()\n workers_num = int(os.environ['RANK_SIZE'])\n for i in range(workers_num):\n trainer = cls_trainer(None, id=i)\n evaluator = self._get_evaluator(trainer.worker_id)\n self.master.run(trainer, evaluator)\n self.master.join()\n self.master.shutdown()\n General.parallel_fully_train = origin_parallel_fully_train\n General.dft = False\n\n def _do_distributed_fully_train(self):\n if os.environ['DEVICE_CATEGORY'] == 'GPU':\n self._do_horovod_fully_train()\n elif os.environ['DEVICE_CATEGORY'] == 'NPU':\n self._do_hccl_fully_train()\n\n @property\n def world_device_size(self):\n \"\"\"World device size is world size * device count in each world.\"\"\"\n import torch\n world_size = General.env.world_size\n device_nums = torch.cuda.device_count()\n num_devices = world_size * device_nums\n return num_devices\n"
] | [
[
"torch.cuda.device_count"
]
] |
expoli/Learn-tensorflow | [
"cc6b30c233678cf8a6f5da97fdf02ff49e810e61"
] | [
"BEGINNER/ML_basics_with_Keras/Regression/Predict_fuel_efficiency.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nprint(tf.__version__)\n\ndataset_path = keras.utils.get_file(\"auto-mpg.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data\")\ndataset_path\n\ncolumn_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',\n 'Acceleration', 'Model Year', 'Origin']\nraw_dataset = pd.read_csv(dataset_path, names=column_names,\n na_values=\"?\", comment='\\t',\n sep=\" \", skipinitialspace=True)\n\ndataset = raw_dataset.copy()\ndataset.tail()\n\ndataset.isna().sum()\n\ndataset = dataset.dropna()\n\norigin = dataset.pop('Origin')\n\ndataset['USA'] = (origin == 1) * 1.0\ndataset['Europe'] = (origin == 2) * 1.0\ndataset['Japan'] = (origin == 3) * 1.0\ndataset.tail()\n\ntrain_dataset = dataset.sample(frac=0.8, random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\nsns.pairplot(train_dataset[[\"MPG\", \"Cylinders\", \"Displacement\", \"Weight\"]], diag_kind=\"kde\")\n\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"MPG\")\ntrain_stats = train_stats.transpose()\ntrain_stats\n\ntrain_labels = train_dataset.pop('MPG')\ntest_labels = test_dataset.pop('MPG')\n\n\ndef norm(x):\n return (x - train_stats['mean']) / train_stats['std']\n\n\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)\n\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),\n layers.Dense(64, activation='relu'),\n layers.Dense(1)\n ])\n\n optimizer = tf.keras.optimizers.RMSprop(0.001)\n\n model.compile(loss='mse',\n optimizer=optimizer,\n metrics=['mae', 'mse'])\n return model\n\n\nmodel = build_model()\n\nmodel.summary()\n\nexample_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result\n\n\n# 通过为每个完成的时期打印一个点来显示训练进度\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, train_labels,\n epochs=EPOCHS, validation_split=0.2, verbose=0,\n callbacks=[PrintDot()])\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [MPG]')\n plt.plot(hist['epoch'], hist['mae'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mae'],\n label='Val Error')\n plt.ylim([0, 5])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$MPG^2$]')\n plt.plot(hist['epoch'], hist['mse'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mse'],\n label='Val Error')\n plt.ylim([0, 20])\n plt.legend()\n plt.show()\n\n\nplot_history(history)\n\nmodel = build_model()\n\n# patience 值用来检查改进 epochs 的数量\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nhistory = model.fit(normed_train_data, train_labels, epochs=EPOCHS,\n validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history(history)\n\nloss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} MPG\".format(mae))\n\ntest_predictions = model.predict(normed_test_data).flatten()\n\nplt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [MPG]')\nplt.ylabel('Predictions [MPG]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0, plt.xlim()[1]])\nplt.ylim([0, plt.ylim()[1]])\n_ = plt.plot([-100, 100], [-100, 100])\n\nerror = test_predictions - test_labels\nplt.hist(error, bins=25)\nplt.xlabel(\"Prediction Error [MPG]\")\n_ = plt.ylabel(\"Count\")\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"tensorflow.keras.utils.get_file",
"matplotlib.pyplot.axis",
"pandas.DataFrame",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.hist",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
M00mo/neuralpredictors | [
"0dd46f0bf03ec3fe53f6a796cbcbea09c4972932"
] | [
"neuralpredictors/data/datasets.py"
] | [
"import json\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom pathlib import Path\nfrom zipfile import ZipFile\n\nimport h5py\nimport numpy as np\nfrom scipy.signal import convolve2d\nfrom torch.utils.data import Dataset\n\nfrom .exceptions import InconsistentDataException, DoesNotExistException\nfrom .transforms import (\n DataTransform,\n MovieTransform,\n StaticTransform,\n Invertible,\n Subsequence,\n Delay,\n)\nfrom .utils import convert_static_h5_dataset_to_folder, zip_dir\nfrom ..utils import recursively_load_dict_contents_from_group\n\n\nclass AttributeHandler:\n def __init__(self, name, h5_handle):\n \"\"\"\n Can be used to turn a dataset within a hdf5 dataset into an attribute.\n\n Args:\n name: name of the dataset in the hdf5 file\n h5_handle: file handle for the hdf5 file\n \"\"\"\n assert name in h5_handle, \"{} must be in {}\".format(name, h5_handle)\n self.name = name\n self.h5_handle = h5_handle\n\n def __getattr__(self, item):\n if item in self.h5_handle[self.name]:\n ret = self.h5_handle[self.name][item][()]\n if ret.dtype.char == \"S\": # convert bytes to unicode\n ret = ret.astype(str)\n return ret\n else:\n raise AttributeError(\"Attribute {} not found\".format(item))\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def keys(self):\n return self.h5_handle[self.name].keys()\n\n def __dir__(self):\n attrs = set(super().__dir__())\n return attrs.union(set(self.h5_handle[self.name].keys()))\n\n\nclass AttributeTransformer(AttributeHandler):\n def __init__(self, name, h5_handle, transforms, data_group):\n \"\"\"\n Allows for id_transform of transforms to be applied to the\n specified attribute. Otherwise behaves like an AttributeHandler\n\n Args:\n name: see AttributeHandler\n h5_handle: see AttributeHandler\n transforms: the set of transforms that's supposed to be applied\n data_group: the data_key of the dataset that this attribute represents\n \"\"\"\n super().__init__(name, h5_handle)\n self.transforms = transforms\n self.data_group = data_group\n\n def __getattr__(self, item):\n ret = {self.data_group: super().__getattr__(item)}\n for tr in self.transforms:\n if hasattr(tr, \"id_transform\"):\n ret = tr.id_transform(ret)\n\n return ret[self.data_group]\n\n\nclass TransformDataset(Dataset):\n def __init__(self, transforms=None):\n \"\"\"\n Abstract Class for Datasets with transformations, providing `transform` and `invert` functions\n to apply data transformation on the elements.\n\n Args:\n transforms: list of transforms to be applied to each data point\n \"\"\"\n self.transforms = transforms or []\n\n def transform(self, x, exclude=None):\n \"\"\"\n Apply transform on a data element from the dataset\n\n Args:\n x (tuple): a data element from the dataset\n exclude (Transform, optional): Type of data transformer to be excluded from transform list. Defaults to None.\n\n Returns:\n tuple: transformed data element\n \"\"\"\n\n for tr in self.transforms:\n if exclude is None or not isinstance(tr, exclude):\n x = tr(x)\n return x\n\n def invert(self, x, exclude=None):\n for tr in reversed(\n filter(lambda tr: not isinstance(tr, exclude), self.transforms)\n ):\n if not isinstance(tr, Invertible):\n raise TypeError(\"Cannot invert\", tr.__class__.__name__)\n else:\n x = tr.inv(x)\n return x\n\n def __iter__(self):\n yield from map(self.__getitem__, range(len(self)))\n\n def __repr__(self):\n return (\n \"{} m={}:\\n\\t({})\".format(\n self.__class__.__name__, len(self), \", \".join(self.data_groups)\n )\n + \"\\n\\t[Transforms: \"\n + \"->\".join([repr(tr) for tr in self.transforms])\n + \"]\"\n )\n\n\nclass H5SequenceSet(TransformDataset):\n def __init__(\n self,\n filename,\n *data_keys,\n output_rename=None,\n transforms=None,\n output_dict=False\n ):\n super().__init__(transforms=transforms)\n\n self.output_dict = output_dict\n\n if output_rename is None:\n output_rename = {}\n\n # a flag that can be changed to turn renaming on/off\n self.rename_output = True\n\n self.output_rename = output_rename\n\n self._fid = h5py.File(filename, \"r\")\n self.data = self._fid\n self.data_loaded = False\n\n # ensure that all elements of\n m = None\n for key in data_keys:\n assert key in self.data, \"Could not find {} in file\".format(key)\n l = len(self.data[key])\n if m is not None and l != m:\n raise ValueError(\"groups have different length\")\n m = l\n self._len = m\n\n # Specify which types of transforms are accepted\n self._transform_set = DataTransform\n\n self.data_keys = data_keys\n self.transforms = transforms or []\n\n self.data_point = namedtuple(\"DataPoint\", data_keys)\n self.output_point = namedtuple(\n \"OutputPoint\", [output_rename.get(k, k) for k in data_keys]\n )\n\n def load_content(self):\n self.data = recursively_load_dict_contents_from_group(self._fid)\n self.data_loaded = True\n\n def unload_content(self):\n self.data = self._fid\n self.data_loaded = False\n\n def __len__(self):\n return self._len\n\n def __getitem__(self, item):\n x = self.data_point(\n *(\n np.array(self.data[g][item if self.data_loaded else str(item)])\n for g in self.data_keys\n )\n )\n for tr in self.transforms:\n assert isinstance(tr, self._transform_set)\n x = tr(x)\n\n # convert to output point\n if self.rename_output:\n x = self.output_point(*x)\n\n if self.output_dict:\n x = x._asdict\n return x\n\n def __getattr__(self, item):\n if item in self.data:\n item = self.data[item]\n if isinstance(item, h5py.Dataset):\n dtype = item.dtype\n item = item[()]\n if dtype.char == \"S\": # convert bytes to unicode\n item = item.astype(str)\n return item\n return item\n else:\n # TODO: check for a proper way to handle cases where super doesn't have __getattr__\n return super().__getattr__(item)\n\n def __repr__(self):\n names = [\n \"{} -> {}\".format(k, self.output_rename[k])\n if k in self.output_rename\n else k\n for k in self.data_keys\n ]\n s = \"{} m={}:\\n\\t({})\".format(\n self.__class__.__name__, len(self), \", \".join(names)\n )\n if self.transforms is not None:\n s += (\n \"\\n\\t[Transforms: \"\n + \"->\".join([repr(tr) for tr in self.transforms])\n + \"]\"\n )\n return s\n\n\nclass MovieSet(H5SequenceSet):\n \"\"\"\n Extension to H5SequenceSet with specific HDF5 dataset assumed. Specifically,\n it assumes that properties such as `neurons` and `stats` are present in the dataset.\n \"\"\"\n\n def __init__(\n self,\n filename,\n *data_groups,\n output_rename=None,\n transforms=None,\n stats_source=\"all\"\n ):\n super().__init__(\n filename, *data_groups, output_rename=output_rename, transforms=transforms\n )\n self.stats_source = stats_source\n\n # set to accept only MovieTransform\n self._transform_set = MovieTransform\n\n @property\n def neurons(self):\n return AttributeTransformer(\n \"neurons\", self.data, self.transforms, data_group=\"responses\"\n )\n\n @property\n def n_neurons(self):\n return len(self.neurons.unit_ids)\n\n @property\n def input_shape(self):\n name = (\n self.output_rename.get(\"inputs\", \"inputs\")\n if self.rename_output\n else \"inputs\"\n )\n return (1,) + getattr(self[0], name).shape\n\n def transformed_mean(self, stats_source=None):\n if stats_source is None:\n stats_source = self.stats_source\n\n tmp = [\n np.atleast_1d(self.statistics[g][stats_source][\"mean\"][()])\n for g in self.data_keys\n ]\n x = self.transform(self.data_point(*tmp), exclude=(Subsequence, Delay))\n if self.rename_output:\n x = self.output_point(*x)\n return x\n\n def rf_base(self, stats_source=\"all\"):\n N, c, t, w, h = self.img_shape\n t = min(t, 150)\n mean = lambda dk: self.statistics[dk][stats_source][\"mean\"][()]\n d = dict(\n inputs=np.ones((1, c, t, w, h)) * np.array(mean(\"inputs\")),\n eye_position=np.ones((1, t, 1)) * mean(\"eye_position\")[None, None, :],\n behavior=np.ones((1, t, 1)) * mean(\"behavior\")[None, None, :],\n responses=np.ones((1, t, 1)) * mean(\"responses\")[None, None, :],\n )\n return self.transform(\n self.data_point(*[d[dk] for dk in self.data_keys]), exclude=Subsequence\n )\n\n def rf_noise_stim(self, m, t, stats_source=\"all\"):\n \"\"\"\n Generates a Gaussian white noise stimulus filtered with a 3x3 Gaussian filter\n for the computation of receptive fields. The mean and variance of the Gaussian\n noise are set to the mean and variance of the stimulus ensemble.\n\n The behvavior, eye movement statistics, and responses are set to their respective means.\n Args:\n m: number of noise samples\n t: length in time\n\n Returns: tuple of input, behavior, eye, and response\n\n \"\"\"\n N, c, _, w, h = self.img_shape\n stat = lambda dk, what: self.statistics[dk][stats_source][what][()]\n mu, s = stat(\"inputs\", \"mean\"), stat(\"inputs\", \"std\")\n h_filt = np.float64(\n [[1 / 16, 1 / 8, 1 / 16], [1 / 8, 1 / 4, 1 / 8], [1 / 16, 1 / 8, 1 / 16]]\n )\n noise_input = (\n np.stack(\n [\n convolve2d(np.random.randn(w, h), h_filt, mode=\"same\")\n for _ in range(m * t * c)\n ]\n ).reshape((m, c, t, w, h))\n * s\n + mu\n )\n\n mean_beh = np.ones((m, t, 1)) * stat(\"behavior\", \"mean\")[None, None, :]\n mean_eye = np.ones((m, t, 1)) * stat(\"eye_position\", \"mean\")[None, None, :]\n mean_resp = np.ones((m, t, 1)) * stat(\"responses\", \"mean\")[None, None, :]\n\n d = dict(\n inputs=noise_input.astype(np.float32),\n eye_position=mean_eye.astype(np.float32),\n behavior=mean_beh.astype(np.float32),\n responses=mean_resp.astype(np.float32),\n )\n\n return self.transform(\n self.data_point(*[d[dk] for dk in self.data_groups.values()]),\n exclude=(Subsequence, Delay),\n )\n\n\ndefault_datapoint = namedtuple(\"DefaultDataPoint\", [\"images\", \"responses\"])\n\n\nclass StaticSet(TransformDataset):\n def __init__(self, *data_keys, transforms=None):\n \"\"\"\n Abstract class for static datasets. Defines data_keys and a corresponding datapoint.\n \"\"\"\n super().__init__(transforms=transforms)\n\n self.data_keys = data_keys\n if set(data_keys) == {\"images\", \"responses\"}:\n # this version IS serializable in pickle\n self.data_point = default_datapoint\n else:\n # this version is NOT - you cannot use this with a dataloader with num_workers > 1\n self.data_point = namedtuple(\"DataPoint\", data_keys)\n\n\nclass H5ArraySet(StaticSet):\n def __init__(self, filename, *data_keys, transforms=None):\n \"\"\"\n Dataset for static data stored in hdf5 files.\n\n Args:\n filename: filename of the hdf5 file\n *data_keys: data keys to be read from the file\n transforms: list of transforms applied to each datapoint\n \"\"\"\n super().__init__(*data_keys, transforms=transforms)\n\n self._fid = h5py.File(filename, \"r\")\n self.data = self._fid\n self.data_loaded = False\n m = None\n for key in data_keys:\n assert key in self.data, \"Could not find {} in file\".format(key)\n if m is None:\n m = len(self.data[key])\n else:\n assert m == len(self.data[key]), \"Length of datasets do not match\"\n self._len = m\n\n def load_content(self):\n self.data = recursively_load_dict_contents_from_group(self._fid)\n self.data_loaded = True\n\n def unload_content(self):\n self.data = self._fid\n self.data_loaded = False\n\n def __getitem__(self, item):\n x = self.data_point(*(self.data[g][item] for g in self.data_keys))\n for tr in self.transforms:\n assert isinstance(tr, StaticTransform)\n x = tr(x)\n return x\n\n def __iter__(self):\n yield from map(self.__getitem__, range(len(self)))\n\n def __len__(self):\n return self._len\n\n def __repr__(self):\n return \"\\n\".join(\n [\n \"Tensor {}: {} \".format(key, self.data[key].shape)\n for key in self.data_keys\n ]\n + [\"Transforms: \" + repr(self.transforms)]\n )\n\n def __getattr__(self, item):\n if item in self.data:\n item = self.data[item]\n if isinstance(item, h5py.Dataset):\n dtype = item.dtype\n item = item[()]\n if dtype.char == \"S\": # convert bytes to univcode\n item = item.astype(str)\n return item\n return item\n else:\n raise AttributeError(\n \"Item {} not found in {}\".format(item, self.__class__.__name__)\n )\n\n\nclass StaticImageSet(H5ArraySet):\n def __init__(\n self, filename, *data_keys, transforms=None, cache_raw=False, stats_source=None\n ):\n \"\"\"\n Dataset for h5 files.\n\n Args:\n filename: filename of the hdf5 file\n *data_keys: datasets to be extracted\n transforms: transforms applied to each data point\n cache_raw: whether to cache the raw (untransformed) datapoints\n stats_source: statistic source to be used.\n \"\"\"\n super().__init__(filename, *data_keys, transforms=transforms)\n self.cache_raw = cache_raw\n self.last_raw = None\n self.stats_source = stats_source if stats_source is not None else \"all\"\n\n @property\n def n_neurons(self):\n return len(self[0].responses)\n\n @property\n def neurons(self):\n return AttributeTransformer(\n \"neurons\", self.data, self.transforms, data_group=\"responses\"\n )\n\n @property\n def info(self):\n return AttributeHandler(\"item_info\", self.data)\n\n @property\n def img_shape(self):\n return (1,) + self[0].images.shape\n\n def transformed_mean(self, stats_source=None):\n if stats_source is None:\n stats_source = self.stats_source\n\n tmp = [\n np.atleast_1d(self.statistics[dk][stats_source][\"mean\"][()])\n for dk in self.data_keys\n ]\n return self.transform(self.data_point(*tmp))\n\n def __repr__(self):\n return super().__repr__() + (\n \"\\n\\t[Stats source: {}]\".format(self.stats_source)\n if self.stats_source is not None\n else \"\"\n )\n\n def __dir__(self):\n attrs = set(self.__dict__).union(set(dir(type(self))))\n return attrs.union(set(self.data.keys()))\n\n\nclass DirectoryAttributeHandler:\n def __init__(self, path, links=None):\n \"\"\"\n Class that can be used to represent a subdirectory of a FileTree as a property in a FileTree dataset.\n Caches already loaded data items.\n\n Args:\n path: path to the subdiretory (pathlib.Path object)\n \"\"\"\n self.links = links or {}\n self.path = path\n\n def __getattr__(self, item):\n temp_path = self.resolve_item_path(item)\n if temp_path.exists() and temp_path.is_dir():\n val = DirectoryAttributeHandler(temp_path, links=self.links)\n else:\n val = np.load(self.path / \"{}.npy\".format(item))\n return val\n\n def resolve_item_path(self, item):\n if item in self.links:\n item = self.links[item]\n return self.path / item\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def keys(self):\n return [e.stem for e in self.path.glob(\"*\")]\n\n def __dir__(self):\n attrs = set(super().__dir__())\n return attrs.union(set(self.keys())).union(set(self.links.keys()))\n\n\nclass DirectoryAttributeTransformer(DirectoryAttributeHandler):\n def __init__(self, path, transforms, data_group, links=None):\n \"\"\"\n Class that can be used to represent a subdirectory of a FileTree as a property in a FileTree dataset.\n Like DirectoryAttributeHandler but allows for id_transform of transforms to be applied to the\n specified attribute.\n\n Args:\n path: path to the subdiretory (pathlib.Path object)\n \"\"\"\n\n super().__init__(path, links=links)\n self.transforms = transforms\n self.data_group = data_group\n\n def __getattr__(self, item):\n ret = {self.data_group: super().__getattr__(item)}\n for tr in self.transforms:\n ret = tr.id_transform(ret)\n return ret[self.data_group]\n\n\nclass FileTreeDataset(StaticSet):\n def __init__(self, dirname, *data_keys, transforms=None):\n \"\"\"\n Dataset stored as a file tree. The tree needs to have the subdirs data, meta, meta/neurons, meta/statistics,\n and meta/trials. Please refer to convert_static_h5_dataset_to_folder in neuralpredictors.data.utils\n how to export an hdf5 file into that structure.\n\n\n Here is an example. Data directories with too many entries have trials as .npy files\n named 0.npy, 1.npy, ...\n The meta/trials subdirectory must have single .npy files with arrays that provide additional trial based\n meta data.\n\n static22564-2-13-preproc0\n ├── data\n │ ├── behavior [5955 entries exceeds filelimit, not opening dir]\n │ ├── images [5955 entries exceeds filelimit, not opening dir]\n │ ├── pupil_center [5955 entries exceeds filelimit, not opening dir]\n │ └── responses [5955 entries exceeds filelimit, not opening dir]\n └── meta\n ├── neurons\n │ ├── animal_ids.npy\n │ ├── area.npy\n │ ├── layer.npy\n │ ├── scan_idx.npy\n │ ├── sessions.npy\n │ └── unit_ids.npy\n ├── statistics\n │ ├── behavior\n │ │ ├── all\n │ │ │ ├── max.npy\n │ │ │ ├── mean.npy\n │ │ │ ├── median.npy\n │ │ │ ├── min.npy\n │ │ │ └── std.npy\n │ │ └── stimulus_frame\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ ├── images\n │ │ ├── all\n │ │ │ ├── max.npy\n │ │ │ ├── mean.npy\n │ │ │ ├── median.npy\n │ │ │ ├── min.npy\n │ │ │ └── std.npy\n │ │ └── stimulus_frame\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ ├── pupil_center\n │ │ ├── all\n │ │ │ ├── max.npy\n │ │ │ ├── mean.npy\n │ │ │ ├── median.npy\n │ │ │ ├── min.npy\n │ │ │ └── std.npy\n │ │ └── stimulus_frame\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ └── responses\n │ ├── all\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ └── stimulus_frame\n │ ├── max.npy\n │ ├── mean.npy\n │ ├── median.npy\n │ ├── min.npy\n │ └── std.npy\n └── trials [12 entries exceeds filelimit, not opening dir]\n\n Args:\n dirname: root directory name\n *data_keys: data items to be extraced (must be subdirectories of root/data)\n transforms: transforms to be applied to the data (see TransformDataset)\n \"\"\"\n super().__init__(*data_keys, transforms=transforms)\n\n number_of_files = []\n\n if dirname.endswith(\".zip\"):\n if not Path(dirname[:-4]).exists():\n self.unzip(dirname, Path(dirname).absolute().parent)\n else:\n print(\n \"{} exists already. Not unpacking {}\".format(dirname[:-4], dirname)\n )\n\n dirname = dirname[:-4]\n\n self.basepath = Path(dirname).absolute()\n self._config_file = self.basepath / \"config.json\"\n\n if not self._config_file.exists():\n self._save_config(self._default_config)\n\n\n for data_key in data_keys:\n if data_key not in self.trial_info.keys():\n datapath = self.resolve_data_path(data_key)\n number_of_files.append(len(list(datapath.glob(\"*\"))))\n else:\n number_of_files.append(len(self.trial_info[data_key]))\n\n if not np.all(np.diff(number_of_files) == 0):\n raise InconsistentDataException(\"Number of data points is not equal\")\n else:\n self._len = number_of_files[0]\n\n self._cache = {data_key: {} for data_key in data_keys}\n\n _default_config = {\"links\": {}}\n\n def resolve_data_path(self, data_key):\n if self.link_exists(data_key):\n data_key = self.config[\"links\"][data_key]\n datapath = self.basepath / \"data\" / data_key\n\n if not datapath.exists():\n raise DoesNotExistException(\"Data path {} does not exist\".format(datapath))\n return datapath\n\n def link_exists(self, link):\n return \"links\" in self.config and link in self.config[\"links\"]\n\n @property\n def config(self):\n with open(self._config_file) as fid:\n return json.load(fid)\n\n def _save_config(self, cfg):\n with open(self._config_file, \"w\") as fid:\n return json.dump(cfg, fid)\n\n def __len__(self):\n return self._len\n\n def __getitem__(self, item):\n # load data from cache or disk\n ret = []\n for data_key in self.data_keys:\n if item in self._cache[data_key]:\n ret.append(self._cache[data_key][item])\n else:\n if data_key in self.trial_info.keys():\n val = self.trial_info[data_key][item:item+1]\n else:\n datapath = self.resolve_data_path(data_key)\n val = np.load(datapath / \"{}.npy\".format(item))\n self._cache[data_key][item] = val\n ret.append(val)\n\n # create data point and transform\n x = self.data_point(*ret)\n for tr in self.transforms:\n assert isinstance(tr, StaticTransform)\n x = tr(x)\n return x\n\n def add_log_entry(self, msg):\n timestamp = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n with open(self.basepath / \"change.log\", \"a+\") as fid:\n fid.write(\"{}: {}\\n\".format(timestamp, msg))\n\n @staticmethod\n def match_order(target, permuted, not_exist_ok=False):\n \"\"\"\n Matches the order or rows in permuted to by returning an index array such that.\n\n Args:\n not_exist_ok: if the element does not exist, don't return an index\n\n Returns: index array `idx` such that `target == permuted[idx, :]`\n \"\"\"\n\n order, target_idx = [], []\n unmatched_counter = 0\n for i, row in enumerate(target):\n idx = np.sum(permuted - row, axis=1) == 0\n if not not_exist_ok:\n assert idx.sum() == 1\n if idx.sum() == 1:\n order.append(np.where(idx)[0][0])\n target_idx.append(i)\n else:\n unmatched_counter += 1\n if not_exist_ok:\n print(\"Encountered {} unmatched elements\".format(unmatched_counter))\n return np.array(target_idx, dtype=int), np.array(order, dtype=int)\n\n def add_neuron_meta(\n self, name, animal_id, session, scan_idx, unit_id, values, fill_missing=None\n ):\n \"\"\"\n Add new meta information about neurons.\n\n Args:\n name: name of the new meta information\n animal_id: array with animal_ids per first dimension of values\n session: array with session per first dimension of values\n scan_idx: array with scan_idx per first dimension of values\n unit_id: array with unit_id per first dimension of values\n values: new meta information. First dimension must refer to neurons.\n fill_missing: fill the values of the new attribute with NaN if not provided\n \"\"\"\n if (\n not len(animal_id)\n == len(session)\n == len(scan_idx)\n == len(unit_id)\n == len(values)\n ):\n raise InconsistentDataException(\n \"number of trials and identifiers not consistent\"\n )\n\n target = np.c_[\n (\n self.neurons.animal_ids,\n self.neurons.sessions,\n self.neurons.scan_idx,\n self.neurons.unit_ids,\n )\n ]\n permuted = np.c_[(animal_id, session, scan_idx, unit_id)]\n vals = np.ones((len(target),) + values.shape[1:], dtype=values.dtype) * (\n np.nan if fill_missing is None else fill_missing\n )\n tidx, idx = self.match_order(\n target, permuted, not_exist_ok=fill_missing is not None\n )\n\n assert (\n np.sum(target[tidx] - permuted[idx, ...]) == 0\n ), \"Something went wrong in sorting\"\n\n vals[tidx, ...] = values[idx, ...]\n np.save(self.basepath / \"meta/neurons/{}.npy\".format(name), vals)\n self.add_log_entry(\n \"Added new neuron meta attribute {} to meta/neurons\".format(name)\n )\n\n @staticmethod\n def initialize_from(filename, outpath=None, overwrite=False, ignore_all_behaviors=False):\n \"\"\"\n Convenience function. See `convert_static_h5_dataset_to_folder` in `.utils`\n \"\"\"\n convert_static_h5_dataset_to_folder(\n filename, outpath=outpath, overwrite=overwrite, ignore_all_behaviors=ignore_all_behaviors\n )\n\n @property\n def change_log(self):\n if (self.basepath / \"change.log\").exists():\n with open(self.basepath / \"change.log\", \"r\") as fid:\n print(\"\".join(fid.readlines()))\n\n def zip(self, filename=None):\n \"\"\"\n Zips current dataset.\n\n Args:\n filename: Filename for the zip. Directory name + zip by default.\n \"\"\"\n\n if filename is None:\n filename = str(self.basepath) + \".zip\"\n zip_dir(filename, self.basepath)\n\n def unzip(self, filename, path):\n print(\"Unzipping {} into {}\".format(filename, path))\n with ZipFile(filename, \"r\") as zip_obj:\n zip_obj.extractall(path)\n\n def add_link(self, attr, new_name):\n \"\"\"\n Add a new dataset that links to an existing dataset.\n\n For instance `targets` that links to `responses`\n\n Args:\n attr: existing attribute such as `responses`\n new_name: name of the new attribute reference.\n \"\"\"\n if not (self.basepath / \"data/{}\".format(attr)).exists():\n raise DoesNotExistException(\"Link target does not exist\")\n\n if (self.basepath / \"data/{}\".format(new_name)).exists():\n raise FileExistsError(\"Link target already exists\")\n\n config = self.config\n if not \"links\" in config:\n config[\"links\"] = {}\n config[\"links\"][new_name] = attr\n self._save_config(config)\n\n @property\n def n_neurons(self):\n return len(self[0].responses)\n\n @property\n def neurons(self):\n return DirectoryAttributeTransformer(\n self.basepath / \"meta/neurons\",\n self.transforms,\n data_group=\"responses\" if \"responses\" in self.data_keys else \"targets\",\n )\n\n @property\n def trial_info(self):\n return DirectoryAttributeHandler(self.basepath / \"meta/trials\")\n\n @property\n def statistics(self):\n return DirectoryAttributeHandler(\n self.basepath / \"meta/statistics\", self.config[\"links\"]\n )\n\n @property\n def img_shape(self):\n return (1,) + self[0].images.shape\n\n def __repr__(self):\n return \"{} {} (n={} items)\\n\\t{}\".format(\n self.__class__.__name__, self.basepath, self._len, \", \".join(self.data_keys)\n )\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.diff",
"numpy.random.randn",
"numpy.atleast_1d",
"numpy.array",
"numpy.where",
"numpy.float64"
]
] |
iserh/data-augmentation | [
"1e1e99177ff4256c68cafe043bd7e50d52bf669d"
] | [
"src/vae/models/architectures/model_v3.py"
] | [
"\"\"\"Variational autoencoder module class.\"\"\"\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom utils import init_weights\nfrom vae.models.base import Decoder, Encoder, VAEConfig, VAEModel\n\n\nclass _Encoder(Encoder):\n def __init__(self, z_dim: int, num_features: int) -> None:\n super(_Encoder, self).__init__()\n self.linear_stage = nn.Sequential(\n nn.Linear(num_features, 512),\n nn.ReLU(inplace=True),\n nn.Linear(512, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, 64),\n nn.ReLU(inplace=True),\n )\n # Encoder mean\n self.mean = nn.Linear(64, z_dim)\n # Encoder Variance log\n self.variance_log = nn.Linear(64, z_dim)\n\n # initialize weights\n self.linear_stage.apply(init_weights)\n self.mean.apply(init_weights)\n self.variance_log.apply(init_weights)\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n x = self.linear_stage(x)\n return self.mean(x), self.variance_log(x)\n\n\nclass _Decoder(Decoder):\n def __init__(self, z_dim: int, num_features: int) -> None:\n super(_Decoder, self).__init__()\n self.linear_stage = nn.Sequential(\n nn.Linear(z_dim, 64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(64, 128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(128, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, num_features),\n nn.Sigmoid(),\n )\n\n # initialize weights\n self.linear_stage.apply(init_weights)\n\n def forward(self, x: Tensor) -> Tensor:\n return self.linear_stage(x)\n\n\nclass VAEModelV3(VAEModel):\n def __init__(self, config: VAEConfig) -> None:\n super().__init__(config)\n self.encoder = _Encoder(config.z_dim, num_features=8)\n self.decoder = _Decoder(config.z_dim, num_features=8)\n\n\ndef _get_model_constructor() -> VAEModelV3:\n return VAEModelV3\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
]
] |
joshp112358/Cirq | [
"c4fac27a9849e589ee05b4f702f2d7c9049aaeea"
] | [
"cirq/ops/controlled_operation.py"
] | [
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import (\n cast,\n Any,\n Collection,\n Optional,\n Sequence,\n Tuple,\n Union,\n TYPE_CHECKING,\n)\n\nimport itertools\nimport numpy as np\n\nfrom cirq import protocols, linalg, value\nfrom cirq.ops import raw_types, gate_operation, controlled_gate\nfrom cirq.type_workarounds import NotImplementedType\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass ControlledOperation(raw_types.Operation):\n \"\"\"Augments existing operations to have one or more control qubits.\n\n This object is typically created via `operation.controlled_by(*qubits)`.\n \"\"\"\n\n def __init__(self,\n controls: Sequence[raw_types.Qid],\n sub_operation: 'cirq.Operation',\n control_values: Optional[Sequence[\n Union[int, Collection[int]]]] = None):\n if control_values is None:\n control_values = ((1,),) * len(controls)\n if len(control_values) != len(controls):\n raise ValueError('len(control_values) != len(controls)')\n # Convert to sorted tuples\n self.control_values = cast(\n Tuple[Tuple[int, ...], ...],\n tuple((val,) if isinstance(val, int) else tuple(sorted(val))\n for val in control_values))\n # Verify control values not out of bounds\n for q, val in zip(controls, self.control_values):\n if not all(0 <= v < q.dimension for v in val):\n raise ValueError(\n 'Control values <{!r}> outside of range for qubit '\n '<{!r}>.'.format(val, q))\n\n if not isinstance(sub_operation, ControlledOperation):\n self.controls = tuple(controls)\n self.sub_operation = sub_operation\n else:\n # Auto-flatten nested controlled operations.\n self.controls = tuple(controls) + sub_operation.controls\n self.sub_operation = sub_operation.sub_operation\n self.control_values += sub_operation.control_values\n\n @property\n def gate(self) -> Optional['cirq.ControlledGate']:\n if self.sub_operation.gate is None:\n return None\n return controlled_gate.ControlledGate(\n self.sub_operation.gate,\n control_values=self.control_values,\n control_qid_shape=[q.dimension for q in self.controls])\n\n @property\n def qubits(self):\n return self.controls + self.sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n n = len(self.controls)\n return ControlledOperation(\n new_qubits[:n], self.sub_operation.with_qubits(*new_qubits[n:]),\n self.control_values)\n\n def _decompose_(self):\n result = protocols.decompose_once(self.sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [\n ControlledOperation(self.controls, op, self.control_values)\n for op in result\n ]\n\n def _value_equality_values_(self):\n return (frozenset(zip(self.controls,\n self.control_values)), self.sub_operation)\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n n = len(self.controls)\n sub_n = len(args.axes) - n\n sub_axes = args.axes[n:]\n for control_vals in itertools.product(*self.control_values):\n active = (..., *(slice(v, v + 1) for v in control_vals),\n *(slice(None),) * sub_n)\n target_view = args.target_tensor[active]\n buffer_view = args.available_buffer[active]\n result = protocols.apply_unitary(self.sub_operation,\n protocols.ApplyUnitaryArgs(\n target_view, buffer_view,\n sub_axes),\n default=NotImplemented)\n\n if result is NotImplemented:\n return NotImplemented\n\n if result is not target_view:\n # HACK: assume they didn't somehow escape the slice view and\n # edit the rest of target_tensor.\n target_view[...] = result\n\n return args.target_tensor\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.sub_operation)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n sub_matrix = protocols.unitary(self.sub_operation, None)\n if sub_matrix is None:\n return NotImplemented\n qid_shape = protocols.qid_shape(self)\n sub_n = len(qid_shape) - len(self.controls)\n tensor = linalg.eye_tensor(qid_shape, dtype=sub_matrix.dtype)\n sub_tensor = sub_matrix.reshape(qid_shape[len(self.controls):] * 2)\n for control_vals in itertools.product(*self.control_values):\n active = (*(v for v in control_vals), *(slice(None),) * sub_n) * 2\n tensor[active] = sub_tensor\n return tensor.reshape((np.prod(qid_shape, dtype=int),) * 2)\n\n def __str__(self):\n if set(self.control_values) == {(1,)}:\n\n def get_prefix(control_vals):\n return 'C'\n else:\n\n def get_prefix(control_vals):\n return 'C{}'.format(''.join(map(str, sorted(control_vals))))\n\n prefix = ''.join(map(get_prefix, self.control_values))\n if isinstance(self.sub_operation, gate_operation.GateOperation):\n return '{}{}({})'.format(prefix, self.sub_operation.gate,\n ', '.join(map(str, self.qubits)))\n return '{}({}, {})'.format(prefix,\n ', '.join(str(q) for q in self.controls),\n str(self.sub_operation))\n\n def __repr__(self):\n return ('cirq.ControlledOperation(controls={!r}, sub_operation={!r}, '\n 'control_values={!r})'.format(self.controls, self.sub_operation,\n self.control_values))\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.sub_operation)\n\n def _resolve_parameters_(self, resolver):\n new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver)\n return ControlledOperation(self.controls, new_sub_op,\n self.control_values)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n u = protocols.unitary(self.sub_operation, default=None)\n if u is None:\n return NotImplemented\n angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0)\n return protocols.trace_distance_from_angle_list(angle_list)\n\n def __pow__(self, exponent: Any) -> 'ControlledOperation':\n new_sub_op = protocols.pow(self.sub_operation,\n exponent,\n NotImplemented)\n if new_sub_op is NotImplemented:\n return NotImplemented\n return ControlledOperation(self.controls, new_sub_op,\n self.control_values)\n\n def _circuit_diagram_info_(self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> Optional['protocols.CircuitDiagramInfo']:\n n = len(self.controls)\n\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=(args.known_qubit_count - n\n if args.known_qubit_count is not None else None),\n known_qubits=(args.known_qubits[n:]\n if args.known_qubits is not None else None),\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n qubit_map=args.qubit_map)\n sub_info = protocols.circuit_diagram_info(self.sub_operation,\n sub_args,\n None)\n if sub_info is None:\n return NotImplemented\n\n def get_symbol(vals):\n if tuple(vals) == (1,):\n return '@'\n return '({})'.format(','.join(map(str, vals)))\n\n wire_symbols = (*(get_symbol(vals) for vals in self.control_values),\n *sub_info.wire_symbols)\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n exponent_qubit_index=None if sub_info.exponent_qubit_index is None\n else sub_info.exponent_qubit_index + 1)\n\n def _json_dict_(self):\n return {\n 'cirq_type': self.__class__.__name__,\n 'controls': self.controls,\n 'control_values': self.control_values,\n 'sub_operation': self.sub_operation,\n }\n"
] | [
[
"numpy.linalg.eigvals",
"numpy.prod"
]
] |
anonymouslorem/library_identification_vulnerability_report | [
"3eb1916b25bcf885640ed19954377edf45f7498a"
] | [
"FastXML/fastxml/fastxml/fastxml.py"
] | [
"from builtins import range\r\nfrom builtins import object\r\nimport os\r\nimport json\r\nfrom collections import OrderedDict\r\n\r\nimport scipy.sparse as sp\r\n\r\nfrom .inferencer import IForest, LeafComputer, Blender, IForestBlender\r\n\r\nclass Inferencer(object):\r\n \"\"\"\r\n Loads up a model for inferencing\r\n \"\"\"\r\n def __init__(self, dname, gamma=30, blend=0.8, leaf_probs=False):\r\n with open(os.path.join(dname, 'settings'), 'rt') as f:\r\n self.__dict__.update(json.load(f))\r\n\r\n self.gamma = gamma\r\n self.blend = blend\r\n self.leaf_probs = leaf_probs\r\n\r\n forest = IForest(dname, self.n_trees, self.n_labels)\r\n if self.leaf_classifiers:\r\n lc = LeafComputer(dname)\r\n predictor = Blender(forest, lc)\r\n else:\r\n predictor = IForestBlender(forest)\r\n\r\n self.predictor = predictor\r\n\r\n def predict(self, X, fmt='sparse'):\r\n assert fmt in ('sparse', 'dict')\r\n s = []\r\n num = X.shape[0] if isinstance(X, sp.csr_matrix) else len(X)\r\n for i in range(num):\r\n Xi = X[i]\r\n mean = self.predictor.predict(Xi.data, Xi.indices, \r\n self.blend, self.gamma, self.leaf_probs)\r\n\r\n if fmt == 'sparse':\r\n s.append(mean)\r\n\r\n else:\r\n od = OrderedDict()\r\n for idx in reversed(mean.data.argsort()):\r\n od[mean.indices[idx]] = mean.data[idx]\r\n \r\n s.append(od)\r\n\r\n if fmt == 'sparse':\r\n return sp.vstack(s)\r\n\r\n return s\r\n\r\n"
] | [
[
"scipy.sparse.vstack"
]
] |
mahesh131998/voice-based-visual-acuity-test | [
"67bf5d2141ee6725c4c37fa3ae67d3cac9cf01bf"
] | [
"eye1.py"
] | [
"# from flask import Flask, render_template, Response, request, redirect, url_for\r\n# import tkinter as tk\r\n# import time\r\n# import random\r\n# import speech_recognition as sr\r\nimport pyttsx3 as engine\r\n# import threading\r\n# from bs4 import BeautifulSoup \r\n# import requests \r\n\r\nfrom flask import Flask, render_template, Response, request, redirect, url_for,flash, session\r\nimport tkinter as tk\r\nimport time\r\nimport random\r\nimport speech_recognition as sr\r\n# import pyttsx3\r\nimport threading\r\nfrom werkzeug.utils import secure_filename\r\nimport os\r\nfrom flask_session import Session\r\nimport numpy as np\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\r\nfrom keras.models import Sequential, load_model\r\nimport time\r\nimport sys\r\nimport requests \r\nfrom bs4 import BeautifulSoup \r\nSESSION_TYPE = 'filesystem'\r\nsess = Session()\r\n\r\nglobal i\r\nglobal p\r\napp = Flask(__name__,static_url_path='/static')\r\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\r\n\r\n\r\n\r\[email protected](\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\[email protected](\"/eye/\", methods=['POST'])\r\ndef index1():\r\n def fun():\r\n window = tk.Tk()\r\n window.configure(background='white')\r\n window.state(\"zoomed\")\r\n canvas = tk.Canvas(window, bg=\"white\", width=980, height=580, highlightthickness=0)\r\n canvas.pack(fill=tk.BOTH, expand=True)\r\n canvas_scroll = tk.Scrollbar(canvas, command=canvas.yview)\r\n canvas_scroll.place(relx=1, rely=0, relheight=1, anchor=tk.NE)\r\n canvas.configure(yscrollcommand=canvas_scroll.set, scrollregion=())\r\n i=0\r\n wrong=0\r\n engine.speak(\"Before begning the test kindly keep 6 meter distance from the screen, we will now test the right eye, cover the left eye\")\r\n j=[152,130,108,87,65,43,33,21,15,9]\r\n for x in j:\r\n i=i+1\r\n # speech to text algorithm\r\n def speech():\r\n nonlocal wrong\r\n def top():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n # read the audio data from the default microphone\r\n r.adjust_for_ambient_noise(source)\r\n print(\"speak now\") \r\n engine.speak(\"you may speak now\")\r\n print('hello world')\r\n # engine.runAndWait()\r\n audio_data = r.record(source, duration=3)\r\n print(\"Recognizing...\")\r\n try:\r\n # convert speech to text\r\n text = r.recognize_google(audio_data, language='en-GB')\r\n print(type(text))\r\n if(((text>='a' and text<= 'z') or (text>='A' and text<='Z')) and (len(text)==1)):\r\n print(len(text))\r\n print(text)\r\n return text \r\n \r\n else:\r\n engine.speak(\"sorry we could not recognise you said, say it clearly again\")\r\n # engine.runAndWait()\r\n return top() \r\n \r\n except:\r\n engine.speak(\"sorry could not recognise ur voice, you will have to say that again\")\r\n # engine.runAndWait()\r\n print(\"sorry could not recognise ur voice\")\r\n return top()\r\n \r\n #Scomparison code\r\n for g in op:\r\n print(op)\r\n d= top()\r\n if d.isupper()== False:\r\n d= d.upper()\r\n print(d)\r\n if g != d:\r\n wrong = wrong+1\r\n \r\n\r\n if wrong !=0:\r\n print(\"wrong=\", wrong)\r\n canvas.destroy()\r\n window.destroy()\r\n break\r\n elif i==10 and wrong==0:\r\n canvas.destroy()\r\n window.destroy()\r\n break\r\n \r\n #this will call the screen display\r\n list = ['A','D','F','L','M','N','W','X']\r\n sampling = random.sample(list, k=5)\r\n op = sampling\r\n # here the random letters are generated\r\n\r\n def applytoLabel():\r\n n = len(op)\r\n element = ''\r\n for i in range(n):\r\n element = element + op[i] +\" \"\r\n return element\r\n m=x\r\n l9 = tk.Label(canvas, text=applytoLabel(),font= (\"Optician Sans\", m ,'bold'), bg=\"white\").grid(column=1, row=1, sticky='nsew',padx=85, pady=250)\r\n canvas.create_window(33,33, window=l9, anchor=tk.NW)\r\n window.after(1,window.update(),speech())\r\n \r\n window.mainloop()\r\n print(\"number of iterations\",i-1) \r\n va =[1.00,0.90,0.80,0.70,0.60,0.50,0.40,0.30,0.20,0.10]\r\n LogMAR = va[i-2] + 0.02 * (wrong)\r\n print(LogMAR,\"LogMAR Units\")\r\n righteye = LogMAR\r\n wrong =0\r\n return righteye\r\n\r\n \r\n\r\n def fun1(): \r\n window1 = tk.Tk()\r\n window1.configure(background='white')\r\n window1.state(\"zoomed\") \r\n canvas1 = tk.Canvas(window1, bg=\"white\", width=980, height=580, highlightthickness=0)\r\n canvas1.pack(fill=tk.BOTH, expand=True)\r\n canvas1_scroll = tk.Scrollbar(canvas1, command=canvas1.yview)\r\n canvas1_scroll.place(relx=1, rely=0, relheight=1, anchor=tk.NE)\r\n canvas1.configure(yscrollcommand=canvas1_scroll.set, scrollregion=())\r\n p=0\r\n wrong1=0\r\n engine.speak(\"Before begning the test kindly keep 6 meter distance from the screen, we will now test your left eye, cover the right eye\")\r\n j=[152,130,108,87,65,43,33,21,15,9]\r\n for x in j:\r\n p=p+1\r\n # speech to text algorithm\r\n def speech():\r\n nonlocal wrong1\r\n def top():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n # read the audio data from the default microphone\r\n r.adjust_for_ambient_noise(source) \r\n print(\"speak now\") \r\n engine.speak(\"you may speak now\")\r\n # engine.runAndWait()\r\n audio_data = r.record(source, duration=3)\r\n print(\"Recognizing...\")\r\n try:\r\n # convert speech to text\r\n text = r.recognize_google(audio_data, language='en-GB')\r\n print(type(text))\r\n if(((text>='a' and text<= 'z') or (text>='A' and text<='Z')) and (len(text)==1)):\r\n print(len(text))\r\n # print(text, \"is an Alphabet)\r\n # print(type(text))\r\n print(text)\r\n return text \r\n \r\n else:\r\n engine.speak(\"sorry we could not recognise you said, say it clearly again\")\r\n # engine.runAndWait()\r\n # print(text)\r\n return top() \r\n \r\n except:\r\n engine.speak(\"sorry could not recognise ur voice, you will have to say that again\")\r\n # engine.runAndWait()\r\n print(\"sorry could not recognise ur voice\")\r\n return top()\r\n\r\n #Scomparison code\r\n for g in op:\r\n print(op)\r\n d= top()\r\n if d.isupper()== False:\r\n d= d.upper()\r\n print(d)\r\n if g != d:\r\n wrong1 = wrong1+1\r\n \r\n\r\n if wrong1 !=0:\r\n print(\"wrong=\", wrong1)\r\n canvas1.destroy()\r\n window1.destroy()\r\n break\r\n elif p==10 and wrong1==0:\r\n canvas1.destroy()\r\n window1.destroy()\r\n break\r\n \r\n #this will call the screen display\r\n list = ['A','D','F','L','M','N','W','X']\r\n sampling = random.sample(list, k=5)\r\n op = sampling\r\n # here the random letters are generated\r\n\r\n def applytoLabel():\r\n n = len(op)\r\n element = ''\r\n for i in range(n):\r\n element = element + op[i] +\" \"\r\n return element\r\n m=x\r\n l9 = tk.Label(canvas1, text=applytoLabel(),font= (\"Optician Sans\", m ,'bold'), bg=\"white\").grid(column=1, row=1, sticky='nsew',padx=85, pady=250)\r\n canvas1.create_window(33,33, window1=l9, anchor=tk.NW)\r\n window1.after(1,window1.update(),speech())\r\n \r\n window1.mainloop()\r\n print(\"number of iterations\",p-1) \r\n va =[1.00,0.90,0.80,0.70,0.60,0.50,0.40,0.30,0.20,0.10]\r\n LogMAR1 = va[p-2] + 0.02 * (wrong1)\r\n print(LogMAR1,\"LogMAR Units\")\r\n lefteye= LogMAR1\r\n wrong1=0\r\n return lefteye\r\n\r\n \r\n right=fun()\r\n \r\n left=fun1()\r\n \r\n \r\n \r\n return render_template('index.html',righteye = right, lefteye = left)\r\n\r\n\r\[email protected]('/webscraping', methods = ['GET', 'POST'])\r\ndef webscraping():\r\n disease_name=''\r\n if request.method == 'POST':\r\n diseasename = request.form[\"browser\"]\r\n print(diseasename)\r\n print(\"hi\")\r\n print(diseasename) \r\n disease = diseasename \r\n URL = \"https://www.nhs.uk/conditions/\"\r\n r = requests.get(URL) \r\n i=0 \r\n soup = BeautifulSoup(r.content, 'html5lib') \r\n\r\n names = []\r\n link = []\r\n\r\n for item in soup.findAll('a', {'class': 'nhsuk-list-panel__link'}):\r\n names.append(item.get_text(strip=True))\r\n\r\n for item in soup.findAll('li', attrs = {'class':'nhsuk-list-panel__item'}):\r\n link.append(item.a['href'] )\r\n\r\n for j in names:\r\n if j == disease :\r\n print(j)\r\n break\r\n else:\r\n i= i+1\r\n\r\n pandu ='https://www.nhs.uk/'+link[i]\r\n\r\n print(pandu)\r\n URL1 = pandu\r\n r1 = requests.get(URL1) \r\n soup = BeautifulSoup(r1.content, 'html5lib') \r\n table = soup.findAll('section') \r\n\r\n quotes = [] \r\n for row in table: \r\n na = row.get_text() \r\n quotes.append(na) \r\n\r\n pop= ''\r\n for fo in quotes:\r\n pop= pop + fo\r\n\r\n print(pop)\r\n return render_template('index.html', pop= pop)\r\n\r\n\r\napp.config['UPLOAD_FOLDER'] = 'C://Users//Mahesh//Desktop//new env//env//data//pogo'\r\[email protected]('/uploader', methods = ['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n filename = secure_filename(f.filename)\r\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n flash('file uploaded successfully')\r\n status = 'file uploaded successfully'\r\n return render_template('index.html', status= status)\r\n\r\n\r\[email protected]('/finder', methods = ['GET', 'POST'])\r\ndef finderpic(): \r\n start = time.time()\r\n #Define Path\r\n model_path = './models/model.h5'\r\n model_weights_path = './models/weights.h5'\r\n test_path = 'data/pogo'\r\n\r\n #Load the pre-trained models\r\n model = load_model(model_path)\r\n model.load_weights(model_weights_path)\r\n\r\n #Define image parameters\r\n img_width, img_height = 150, 150\r\n\r\n #Prediction Function\r\n def predict(file):\r\n x = load_img(file, target_size=(img_width,img_height))\r\n x = img_to_array(x)\r\n x = np.expand_dims(x, axis=0)\r\n array = model.predict(x)\r\n result = array[0]\r\n #print(result)\r\n answer = np.argmax(result)\r\n if answer == 0:\r\n print(\"Predicted: cataract\")\r\n elif answer == 1:\r\n print(\"Predicted:conjunctivities \")\r\n elif answer == 2:\r\n print(\"Predicted: eyelid cyst\")\r\n elif answer == 3:\r\n print(\"Predicted: jaundise\")\r\n\r\n return answer\r\n\r\n #Walk the directory for every image\r\n for i, ret in enumerate(os.walk(test_path)):\r\n for i, filename in enumerate(ret[2]):\r\n if filename.startswith(\".\"):\r\n continue\r\n \r\n print(ret[0] + '/' + filename)\r\n result = predict(ret[0] + '/' + filename)\r\n print(result)\r\n if result == 0:\r\n predict=' cataract'\r\n elif result == 1:\r\n predict= 'conjunctivities' \r\n elif result == 2:\r\n predict= 'eyelid cyst'\r\n elif result == 3:\r\n predict= 'jaundise'\r\n\r\n print(predict)\r\n #Calculate execution time\r\n end = time.time()\r\n dur = end-start\r\n\r\n if dur<60:\r\n print(\"Execution Time:\",dur,\"seconds\")\r\n elif dur>60 and dur<3600:\r\n dur=dur/60\r\n print(\"Execution Time:\",dur,\"minutes\")\r\n else:\r\n dur=dur/(60*60)\r\n print(\"Execution Time:\",dur,\"hours\")\r\n\r\n folder_path = (r'C://Users//Mahesh//Desktop//new env//env//data//pogo')\r\n test = os.listdir(folder_path)\r\n for images in test:\r\n if images.endswith(('jpg','jpeg','png')):\r\n os.remove(os.path.join(folder_path, images))\r\n\r\n print(' images deleted')\r\n return render_template('index.html',predict = predict)\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.secret_key = 'super secret key'\r\n app.config['SESSION_TYPE'] = 'filesystem'\r\n\r\n sess.init_app(app)\r\n app.run(debug=True)"
] | [
[
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_img",
"numpy.expand_dims",
"numpy.argmax"
]
] |
qilei123/AdelaiDet | [
"36f31670c2cc15b11b0367edee2b09d39e764c59"
] | [
"adet/modeling/postprocessing.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport torch\nfrom torch.nn import functional as F\n\nfrom detectron2.layers import paste_masks_in_image\nfrom adet.structures.instances import Instances\nfrom detectron2.utils.memory import retry_if_cuda_oom\n\n\ndef detector_postprocess(results, output_height, output_width, img_cls_pred, mask_threshold=0.5):\n \"\"\"\n Resize the output instances.\n The input images are often resized when entering an object detector.\n As a result, we often need the outputs of the detector in a different\n resolution from its inputs.\n\n This function will resize the raw outputs of an R-CNN detector\n to produce outputs according to the desired output resolution.\n\n Args:\n results (Instances): the raw outputs from the detector.\n `results.image_size` contains the input image resolution the detector sees.\n This object might be modified in-place.\n output_height, output_width: the desired output resolution.\n\n Returns:\n Instances: the resized output from the model, based on the output resolution\n \"\"\"\n\n # Converts integer tensors to float temporaries\n # to ensure true division is performed when\n # computing scale_x and scale_y.\n if isinstance(output_width, torch.Tensor):\n output_width_tmp = output_width.float()\n else:\n output_width_tmp = output_width\n\n if isinstance(output_height, torch.Tensor):\n output_height_tmp = output_height.float()\n else:\n output_height_tmp = output_height\n\n scale_x, scale_y = (\n output_width_tmp / results.image_size[1],\n output_height_tmp / results.image_size[0],\n )\n results = Instances((output_height, output_width), img_cls_pred, **results.get_fields())\n\n if results.has(\"pred_boxes\"):\n output_boxes = results.pred_boxes\n elif results.has(\"proposal_boxes\"):\n output_boxes = results.proposal_boxes\n\n output_boxes.scale(scale_x, scale_y)\n output_boxes.clip(results.image_size)\n\n results = results[output_boxes.nonempty()]\n\n if results.has(\"pred_masks\"):\n results.pred_masks = retry_if_cuda_oom(paste_masks_in_image)(\n results.pred_masks[:, 0, :, :], # N, 1, M, M\n results.pred_boxes,\n results.image_size,\n threshold=mask_threshold,\n )\n\n if results.has(\"pred_keypoints\"):\n results.pred_keypoints[:, :, 0] *= scale_x\n results.pred_keypoints[:, :, 1] *= scale_y\n\n return results\n\n\ndef sem_seg_postprocess(result, img_size, output_height, output_width):\n \"\"\"\n Return semantic segmentation predictions in the original resolution.\n\n The input images are often resized when entering semantic segmentor. Moreover, in same\n cases, they also padded inside segmentor to be divisible by maximum network stride.\n As a result, we often need the predictions of the segmentor in a different\n resolution from its inputs.\n\n Args:\n result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),\n where C is the number of classes, and H, W are the height and width of the prediction.\n img_size (tuple): image size that segmentor is taking as input.\n output_height, output_width: the desired output resolution.\n\n Returns:\n semantic segmentation prediction (Tensor): A tensor of the shape\n (C, output_height, output_width) that contains per-pixel soft predictions.\n \"\"\"\n result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n result = F.interpolate(\n result, size=(output_height, output_width), mode=\"bilinear\", align_corners=False\n )[0]\n return result\n"
] | [
[
"torch.nn.functional.interpolate"
]
] |
wangyidong3/detectron2 | [
"286e6877494353161a99fb26954ef0886ff2d219"
] | [
"tools/plain_train_net.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDetectron2 training script with a plain training loop.\n\nThis scripts reads a given config file and runs the training or evaluation.\nIt is an entry point that is able to train standard models in detectron2.\n\nIn order to let one script support training of many models,\nthis script contains logic that are specific to these built-in models and therefore\nmay not be suitable for your own project.\nFor example, your research project perhaps only needs a single \"evaluator\".\n\nTherefore, we recommend you to use detectron2 as an library and take\nthis file as an example of how to use the library.\nYou may want to write your own script with your datasets and other customizations.\n\nCompared to \"train_net.py\", this script supports fewer default features.\nIt also includes fewer abstraction, therefore is easier to add custom logic.\n\"\"\"\n\nimport logging\nimport os\nfrom collections import OrderedDict\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel\n\nimport detectron2.utils.comm as comm\nfrom detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.data import (\n MetadataCatalog,\n build_detection_test_loader,\n build_detection_train_loader,\n)\nfrom detectron2.engine import default_argument_parser, default_setup, launch\nfrom detectron2.evaluation import (\n CityscapesEvaluator,\n COCOEvaluator,\n COCOPanopticEvaluator,\n DatasetEvaluators,\n LVISEvaluator,\n PascalVOCDetectionEvaluator,\n SemSegEvaluator,\n inference_on_dataset,\n print_csv_format,\n)\nfrom detectron2.modeling import build_model\nfrom detectron2.solver import build_lr_scheduler, build_optimizer\nfrom detectron2.utils.events import (\n CommonMetricPrinter,\n EventStorage,\n JSONWriter,\n TensorboardXWriter,\n)\n\nlogger = logging.getLogger(\"detectron2\")\n\n\ndef get_evaluator(cfg, dataset_name, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesEvaluator(dataset_name)\n if evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n if evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(dataset_name, evaluator_type)\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)\n\n\ndef do_test(cfg, model):\n results = OrderedDict()\n for dataset_name in cfg.DATASETS.TEST:\n data_loader = build_detection_test_loader(cfg, dataset_name)\n evaluator = get_evaluator(\n cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n )\n results_i = inference_on_dataset(model, data_loader, evaluator)\n results[dataset_name] = results_i\n if comm.is_main_process():\n logger.info(\"Evaluation results for {} in csv format:\".format(dataset_name))\n print_csv_format(results_i)\n if len(results) == 1:\n results = list(results.values())[0]\n return results\n\n\ndef do_train(cfg, model, resume=False):\n model.train()\n optimizer = build_optimizer(cfg, model)\n scheduler = build_lr_scheduler(cfg, optimizer)\n\n checkpointer = DetectionCheckpointer(\n model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler\n )\n start_iter = (\n checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get(\"iteration\", -1) + 1\n )\n max_iter = cfg.SOLVER.MAX_ITER\n\n periodic_checkpointer = PeriodicCheckpointer(\n checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter\n )\n\n writers = (\n [\n CommonMetricPrinter(max_iter),\n JSONWriter(os.path.join(cfg.OUTPUT_DIR, \"metrics.json\")),\n TensorboardXWriter(cfg.OUTPUT_DIR),\n ]\n if comm.is_main_process()\n else []\n )\n\n # compared to \"train_net.py\", we do not support accurate timing and\n # precise BN here, because they are not trivial to implement\n data_loader = build_detection_train_loader(cfg)\n logger.info(\"Starting training from iteration {}\".format(start_iter))\n with EventStorage(start_iter) as storage:\n for data, iteration in zip(data_loader, range(start_iter, max_iter)):\n iteration = iteration + 1\n storage.step()\n\n loss_dict = model(data)\n losses = sum(loss for loss in loss_dict.values())\n assert torch.isfinite(losses).all(), loss_dict\n\n loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n if comm.is_main_process():\n storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)\n\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n storage.put_scalar(\"lr\", optimizer.param_groups[0][\"lr\"], smoothing_hint=False)\n scheduler.step()\n\n if (\n cfg.TEST.EVAL_PERIOD > 0\n and iteration % cfg.TEST.EVAL_PERIOD == 0\n and iteration != max_iter\n ):\n do_test(cfg, model)\n # Compared to \"train_net.py\", the test results are not dumped to EventStorage\n comm.synchronize()\n\n if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter):\n for writer in writers:\n writer.write()\n periodic_checkpointer.step(iteration)\n\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n\n model = build_model(cfg)\n logger.info(\"Model:\\n{}\".format(model))\n if args.eval_only:\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n return do_test(cfg, model)\n\n distributed = comm.get_world_size() > 1\n if distributed:\n model = DistributedDataParallel(\n model, device_ids=[comm.get_local_rank()], broadcast_buffers=False\n )\n\n do_train(cfg, model)\n return do_test(cfg, model)\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n"
] | [
[
"torch.isfinite",
"torch.cuda.device_count"
]
] |
potassco/xorro | [
"6ed499ac1608cf1d1d1b82b632d5961ee1bd8439"
] | [
"xorro/tests/gje_test.py"
] | [
"\"\"\"\nGauss-Jordan Tests Suite\n\"\"\"\nimport xorro\nfrom xorro import gje\nfrom xorro import gje_simplex as simplex\nimport numpy as np\n\ndef cols_state_to_matrix(state):\n ## Parse columns state to matrix\n return gje.columns_state_to_matrix(state)\n\ndef get_clause(m,lits):\n ## Deduce clause after GJE\n return gje.deduce_clause(m,lits)\n\ndef xor_columns(col,parity):\n ## XOR parity column with parity column\n return gje.xor_columns(col,parity)\n\ndef swap_row(m,i,j):\n ## Swap Rows m[i] with m[j]\n return gje.swap(m,i,j)\n\ndef xor_row(m,i,j):\n ## XOR Rows m[i] with m[j]\n return gje.xor(m,i,j)\n \ndef remove_rows_zeros(m):\n ## Remove rows with all zeros including the augmented column\n matrix = gje.remove_rows_zeros(m)\n return matrix\n\ndef check_sat(m):\n ## Check SAT\n return gje.check_sat(m)\n \ndef solve_gje(m, show):\n ## If there are more than unary xors perform GJE\n if len(m[0]) > 2:\n m = gje.remove_rows_zeros(m)\n m = gje.perform_gauss_jordan_elimination(m, show)\n return m\n\ndef solve_gje_(m, show):\n ## If there are more than unary xors perform GJE\n if len(m[0]) > 2:\n m = gje.remove_rows_zeros(m)\n m = np.array([np.array(xi) for xi in m])\n m = gje.perform_gauss_jordan_elimination_(m, show)\n return m\n \n\n\n\"\"\"\nGauss-Jordan Exclusive Tests\nParse the columns state to a binary matrix and return the list of literals\n\"\"\"\ndef test_columns_state_to_matrix(self):\n self.assertEqual(cols_state_to_matrix(\n {'parity': [0, 1, 1, 0, 0], 2: [1, 0, 0, 1, 0], 3: [0, 0, 0, 0, 1], 4: [1, 1, 0, 0, 0], 5: [0, 1, 0, 0, 0], 6: [1, 1, 0, 0, 0], 7: [0, 0, 1, 0, 1], 8: [0, 0, 1, 0, 0], 9: [0, 0, 0, 1, 0], 10: [0, 0, 0, 1, 0]}),\n ([[1,0,1,0,1,0,0,0,0,0],\n [0,0,1,1,1,0,0,0,0,1],\n [0,0,0,0,0,1,1,0,0,1],\n [1,0,0,0,0,0,0,1,1,0],\n [0,1,0,0,0,1,0,0,0,0]],[2,3,4,5,6,7,8,9,10]))\n\n \n\"\"\"\nDeduce clause after Gauss-Jordan Elimination\n\"\"\"\ndef test_get_clauses(self):\n self.assertEqual(get_clause([[1, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]], [2,3,4]), [-2])\n\n self.assertEqual(get_clause([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 1]], [2,3,4]), [-2,-3,4])\n\n self.assertEqual(get_clause([[1, 0, 1, 1, 0, 1],\n [0, 1, 1, 0, 0, 0],\n [1, 0, 1, 1, 1, 0]], [2,3,4,5,6]), [])\n\n\"\"\"\nXOR a single column with Parity column Tests\n\"\"\"\ndef test_xor_columns(self):\n self.assertEqual(xor_columns([1, 0],[1, 0]),[0, 0])\n\n self.assertEqual(xor_columns([0, 0, 0, 0, 0],[1, 1, 1, 1, 1]),[1, 1, 1, 1, 1])\n\n self.assertEqual(xor_columns([0, 1, 0, 1],[1, 0, 1, 0]),[1, 1, 1, 1])\n\n \n\"\"\"\nSwap Rows Tests\n\"\"\"\ndef test_swap_rows(self):\n self.assertEqual(swap_row([[1, 0, 1, 1, 1, 1],\n [1, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 0, 1]], 1, 2),[[1, 0, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 1],\n [1, 1, 0, 1, 0, 1]])\n\n self.assertEqual(swap_row([[0, 0],\n [1, 1]], 1, 0),[[1, 1],\n [0, 0]])\n\n self.assertEqual(swap_row([[0, 1],\n [1, 0]], 1, 0),[[1, 0],\n [0, 1]])\n\n\"\"\"\nXOR Rows Tests\n\"\"\"\ndef test_xor_rows(self):\n self.assertEqual(xor_row([[1, 0],\n [1, 1],\n [1, 0]], 0, 1),[[1, 0],\n [0, 1],\n [1, 0]])\n\n self.assertEqual(xor_row([[0, 0],\n [1, 1]], 1, 0),[[1, 1],\n [1, 1]])\n\n self.assertEqual(xor_row([[0, 0],\n [0, 0]], 1, 0),[[0, 0],\n [0, 0]])\n\n\"\"\" \nPre GJE... Remove Rows if they are all zeros\n\"\"\"\n## Remove Rows full of Zeros \ndef test_remove_zeros(self):\n self.assertEqual(remove_rows_zeros([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]),\n [[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1]])\n\n self.assertEqual(remove_rows_zeros([[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 0]]),\n [[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1]])\n\n self.assertEqual(remove_rows_zeros([[0, 1, 1],\n [1, 0, 0],\n [0, 0, 0]]),\n [[0, 1, 1],\n [1, 0, 0]])\n\n\n\"\"\" \nCheck Satisfiability/Conflict wrt the augmented column. \nReturn True if conflict (It must exist an empty odd equation)\n\"\"\"\n## Check SATISFIABILITY\ndef test_check_sat(self):\n self.assertEqual(check_sat([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]),True)\n\n self.assertEqual(check_sat([[1, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0]]),False)\n\n self.assertEqual(check_sat([[1, 0, 1],\n [0, 1, 0],\n [0, 0, 1]]),True)\n\n self.assertEqual(check_sat([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]]),False)\n\n\n\"\"\"\nGauss-Jordan Elimination Tests\n\nThe second parameter in the solve function is a flag.\nIf True, it will display the GJ Elimination Procedure\n\n\"\"\"\n\n## No GJE due matrix size. Return the same matrix to check SAT\ndef test_no_gje(self):\n self.assertEqual(solve_gje([[1, 0],\n [1, 1],\n [1, 0]],False),\n [[1, 0],\n [1, 1],\n [1, 0]])\n\n self.assertEqual(solve_gje([[1, 0],\n [0, 1]],False),\n [[1, 0],\n [0, 1]])\n \n # solve_gje_\n self.assertEqual(solve_gje_([[1, 0],\n [1, 1],\n [1, 0]],False),\n [[1, 0],\n [1, 1],\n [1, 0]])\n\n self.assertEqual(solve_gje_([[1, 0],\n [0, 1]],False),\n [[1, 0],\n [0, 1]])\n\n## More Columns than Rows\ndef test_more_cols(self):\n self.assertEqual(solve_gje([[0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0],\n [1, 0, 0, 1, 0]],False),\n [[1, 0, 0, 1, 0],\n [0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n\n self.assertEqual(solve_gje([[0, 1, 1, 0],\n [0, 1, 1, 0],\n [1, 0, 0, 0]],False),\n [[1, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]])\n\n self.assertEqual(solve_gje([[1, 0, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False),\n [[1, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0]])\n\n self.assertEqual(solve_gje([[0, 1, 0, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 1, 0, 0, 0, 0]],False),\n [[1, 0, 0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 0, 1, 0, 1, 1, 0, 0],\n [1, 1, 1, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 0, 0, 1],\n [0, 1, 0, 1, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False),\n [[1, 0, 0, 0, 0, 1, 0, 1],\n [0, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1]])\n\n # solve_gje_\n self.assertEqual(solve_gje_([[0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0],\n [1, 0, 0, 1, 0]],False).tolist(),\n [[1, 0, 0, 1, 0],\n [0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n\n self.assertEqual(solve_gje_([[0, 1, 1, 0],\n [0, 1, 1, 0],\n [1, 0, 0, 0]],False).tolist(),\n [[1, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]])\n\n self.assertEqual(solve_gje_([[1, 0, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False).tolist(),\n [[1, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0]])\n\n self.assertEqual(solve_gje_([[0, 1, 0, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 1, 0, 0, 0, 0]],False).tolist(),\n [[1, 0, 0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 0, 1, 0, 1, 1, 0, 0],\n [1, 1, 1, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 0, 0, 1],\n [0, 1, 0, 1, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False).tolist(),\n [[1, 0, 0, 0, 0, 1, 0, 1],\n [0, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1]])\n\n## Square Matrix\ndef test_square(self):\n self.assertEqual(solve_gje([[1, 0, 1, 0, 1, 0],\n [1, 1, 1, 0, 0, 1],\n [0, 0, 1, 0, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0]],False),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 1, 0]],False),\n [[1, 0, 0, 0, 1],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 0, 1],\n [0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 1, 1, 1],\n [1, 0, 1, 0],\n [0, 0, 1, 0]],False),\n [[1, 0, 0, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0]])\n\n self.assertEqual(solve_gje([[0, 0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [1, 0, 0, 1, 0, 1]],False),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 1, 1],\n [1, 0, 1]],False),\n [[1, 0, 1],\n [0, 1, 0]])\n\n # solve_gje_\n self.assertEqual(solve_gje_([[1, 0, 1, 0, 1, 0],\n [1, 1, 1, 0, 0, 1],\n [0, 0, 1, 0, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0]],False).tolist(),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 1, 0]],False).tolist(),\n [[1, 0, 0, 0, 1],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 0, 1],\n [0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 1, 1, 1],\n [1, 0, 1, 0],\n [0, 0, 1, 0]],False).tolist(),\n [[1, 0, 0, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0]])\n\n self.assertEqual(solve_gje_([[0, 0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [1, 0, 0, 1, 0, 1]],False).tolist(),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 1, 1],\n [1, 0, 1]],False).tolist(),\n [[1, 0, 1],\n [0, 1, 0]])\n \n\n\n## More Rows than Columns\ndef test_more_rows(self):\n self.assertEqual(solve_gje([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 1]],False),\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1]])\n\n self.assertEqual(solve_gje([[0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 1, 0]],False),\n [[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 1]])\n\n self.assertEqual(solve_gje([[0, 1, 1],\n [1, 0, 0],\n [0, 0, 0]],False),\n [[1, 0, 0],\n [0, 1, 1]])\n\n # solve_gje_\n self.assertEqual(solve_gje_([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 1]],False).tolist(),\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1]])\n\n self.assertEqual(solve_gje_([[0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 1, 0]],False).tolist(),\n [[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 1]])\n\n self.assertEqual(solve_gje_([[0, 1, 1],\n [1, 0, 0]],False).tolist(),\n [[1, 0, 0],\n [0, 1, 1]])\n\n\ndef test_incremental_reduce(self):\n mm = simplex.Matrix([[1, 0, 0, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__reduce__(0,0),\n ([1],[2]))\n\n\n mm = simplex.Matrix([[1, 0, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__reduce__(2,2),\n ([1],[0]))\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 1],\n [0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0]])\n self.assertEqual(mm.__reduce__(4,0),\n ([1,2],[]))\n\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 0]])\n self.assertEqual(mm.__reduce__(5,1),\n ([2],[0]))\n\n\ndef test_remove_row(self):\n mm = simplex.Matrix([[1, 0, 0, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__remove_row__([1, 0, 0, 1]),\n [[1, 1, 1, 1],\n [0, 0, 1, 0]])\n\n\n mm = simplex.Matrix([[1, 0, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__remove_row__([0, 1, 1, 0]),\n [[1, 0, 0, 1],\n [0, 0, 1, 0]])\n\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 1],\n [0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0]])\n self.assertEqual(mm.__remove_row__([0, 1, 1, 1, 1, 0]),\n [[1, 0, 0, 1, 1, 1],\n [0, 0, 1, 1, 1, 0]])\n\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 0]])\n self.assertEqual(mm.__remove_row__([0, 0, 1, 1, 0, 1, 0]),\n [[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0]])\n\ndef test_remove_col(self):\n mm = simplex.Matrix([[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0]])\n self.assertEqual(mm.__remove_col__(2),\n [[1, 0, 1, 1, 0, 1],\n [0, 1, 0, 1, 1, 0]])\n"
] | [
[
"numpy.array"
]
] |
sIncerass/nums | [
"57c4d8f67c31c6215dea1ede07e8c0f063c68a6b"
] | [
"nums/core/array/blockarray.py"
] | [
"# coding=utf-8\n# Copyright (C) 2020 NumS Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport itertools\n\nimport numpy as np\n\nfrom nums.core.array import selection\nfrom nums.core.array import utils as array_utils\nfrom nums.core.array.base import BlockArrayBase, Block\nfrom nums.core.array.view import ArrayView\nfrom nums.core.grid.grid import ArrayGrid\nfrom nums.core.compute.compute_manager import ComputeManager\n\n\nclass BlockArray(BlockArrayBase):\n @classmethod\n def empty(cls, shape, block_shape, dtype, cm: ComputeManager):\n grid = ArrayGrid(shape=shape, block_shape=block_shape, dtype=dtype.__name__)\n grid_meta = grid.to_meta()\n arr = BlockArray(grid, cm)\n for grid_entry in grid.get_entry_iterator():\n arr.blocks[grid_entry].oid = cm.empty(\n grid_entry,\n grid_meta,\n syskwargs={\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape},\n )\n return arr\n\n @classmethod\n def from_scalar(cls, val, cm):\n if not array_utils.is_scalar(val):\n raise ValueError(\"%s is not a scalar.\" % val)\n return BlockArray.from_np(np.array(val), block_shape=(), copy=False, cm=cm)\n\n @classmethod\n def from_oid(cls, oid, shape, dtype, cm):\n block_shape = shape\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n ba = BlockArray(grid, cm)\n for i, grid_entry in enumerate(grid.get_entry_iterator()):\n assert i == 0\n ba.blocks[grid_entry].oid = oid\n return ba\n\n @classmethod\n def from_np(cls, arr, block_shape, copy, cm):\n dtype_str = str(arr.dtype)\n grid = ArrayGrid(arr.shape, block_shape, dtype_str)\n rarr = BlockArray(grid, cm)\n grid_entry_iterator = grid.get_entry_iterator()\n for grid_entry in grid_entry_iterator:\n grid_slice = grid.get_slice(grid_entry)\n block = arr[grid_slice]\n if copy:\n block = np.copy(block)\n rarr.blocks[grid_entry].oid = cm.put(block)\n rarr.blocks[grid_entry].dtype = getattr(np, dtype_str)\n return rarr\n\n @classmethod\n def from_blocks(cls, arr: np.ndarray, result_shape, cm):\n sample_idx = tuple(0 for dim in arr.shape)\n if isinstance(arr, Block):\n sample_block = arr\n result_shape = ()\n else:\n sample_block = arr[sample_idx]\n if result_shape is None:\n result_shape = array_utils.shape_from_block_array(arr)\n result_block_shape = sample_block.shape\n result_dtype_str = sample_block.dtype.__name__\n result_grid = ArrayGrid(\n shape=result_shape, block_shape=result_block_shape, dtype=result_dtype_str\n )\n assert arr.shape == result_grid.grid_shape\n result = BlockArray(result_grid, cm)\n for grid_entry in result_grid.get_entry_iterator():\n if isinstance(arr, Block):\n block: Block = arr\n else:\n block: Block = arr[grid_entry]\n result.blocks[grid_entry] = block\n return result\n\n def copy(self):\n grid_copy = self.grid.from_meta(self.grid.to_meta())\n rarr_copy = BlockArray(grid_copy, self.cm)\n for grid_entry in grid_copy.get_entry_iterator():\n rarr_copy.blocks[grid_entry] = self.blocks[grid_entry].copy()\n return rarr_copy\n\n def touch(self):\n \"\"\"\n \"Touch\" an array. This is an efficient distributed \"wait\" operation.\n \"\"\"\n oids = []\n for grid_entry in self.grid.get_entry_iterator():\n block: Block = self.blocks[grid_entry]\n oids.append(\n self.cm.touch(\n block.oid,\n syskwargs={\n \"grid_entry\": block.grid_entry,\n \"grid_shape\": block.grid_shape,\n },\n )\n )\n self.cm.get(oids)\n return self\n\n def reshape(self, *shape, **kwargs):\n block_shape = kwargs.get(\"block_shape\", None)\n if array_utils.is_int(shape):\n shape = (shape,)\n elif len(shape) == 0:\n shape = self.shape\n elif isinstance(shape[0], (tuple, list)):\n assert len(shape) == 1\n shape = shape[0]\n else:\n assert all(np.issubdtype(type(n), int) for n in shape)\n shape = Reshape.compute_shape(self.shape, shape)\n if block_shape is None:\n if shape == self.shape:\n # This is a noop.\n block_shape = self.block_shape\n else:\n block_shape = self.cm.get_block_shape(shape, self.dtype)\n return Reshape()(self, shape, block_shape)\n\n def expand_dims(self, axis):\n \"\"\"\n This function refers to the numpy implementation of expand_dims.\n \"\"\"\n if type(axis) not in (tuple, list):\n axis = (axis,)\n out_ndim = len(axis) + self.ndim\n axis = np.core.numeric.normalize_axis_tuple(axis, out_ndim)\n\n shape_it = iter(self.shape)\n block_shape_it = iter(self.block_shape)\n shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]\n block_shape = [\n 1 if ax in axis else next(block_shape_it) for ax in range(out_ndim)\n ]\n return self.reshape(shape, block_shape=block_shape)\n\n def squeeze(self):\n shape = self.shape\n block_shape = self.block_shape\n new_shape = []\n new_block_shape = []\n for s, b in zip(shape, block_shape):\n if s == 1:\n assert b == 1\n continue\n new_shape.append(s)\n new_block_shape.append(b)\n return self.reshape(new_shape, block_shape=new_block_shape)\n\n def swapaxes(self, axis1, axis2):\n meta_swap = self.grid.to_meta()\n shape = list(meta_swap[\"shape\"])\n block_shape = list(meta_swap[\"block_shape\"])\n dim = len(shape)\n if axis1 >= dim or axis2 >= dim:\n raise ValueError(\"axis is larger than the array dimension\")\n shape[axis1], shape[axis2] = shape[axis2], shape[axis1]\n block_shape[axis1], block_shape[axis2] = block_shape[axis2], block_shape[axis1]\n meta_swap[\"shape\"] = tuple(shape)\n meta_swap[\"block_shape\"] = tuple(block_shape)\n grid_swap = ArrayGrid.from_meta(meta_swap)\n rarr_src = np.ndarray(self.blocks.shape, dtype=\"O\")\n\n for grid_entry in self.grid.get_entry_iterator():\n rarr_src[grid_entry] = self.blocks[grid_entry].swapaxes(axis1, axis2)\n rarr_src = rarr_src.swapaxes(axis1, axis2)\n\n rarr_swap = BlockArray(grid_swap, self.cm, rarr_src)\n return rarr_swap\n\n def __getattr__(self, item):\n if item == \"__array_priority__\" or item == \"__array_struct__\":\n # This is triggered by a numpy array on the LHS.\n raise ValueError(\"Unable to covert numpy array to block array.\")\n elif item == \"ndim\":\n return len(self.shape)\n elif item == \"T\":\n metaT = self.grid.to_meta()\n metaT[\"shape\"] = tuple(reversed(metaT[\"shape\"]))\n metaT[\"block_shape\"] = tuple(reversed(metaT[\"block_shape\"]))\n gridT = ArrayGrid.from_meta(metaT)\n rarrT = BlockArray(gridT, self.cm)\n rarrT.blocks = np.copy(self.blocks.T)\n for grid_entry in rarrT.grid.get_entry_iterator():\n rarrT.blocks[grid_entry] = rarrT.blocks[grid_entry].transpose()\n return rarrT\n else:\n raise NotImplementedError(item)\n\n def __getitem__(self, item):\n if not isinstance(item, tuple):\n ss = (item,)\n else:\n ss = item\n # We need to fetch any block arrays.\n tmp = []\n for entry in ss:\n if isinstance(entry, BlockArray):\n tmp.append(entry.get())\n else:\n tmp.append(entry)\n ss = tuple(tmp)\n is_handled_advanced = True\n if len(ss) > 1:\n # Check if all entries are full slices except the last entry.\n for entry in ss[:-1]:\n is_handled_advanced = is_handled_advanced and (\n isinstance(entry, slice)\n and entry.start is None\n and entry.stop is None\n )\n if is_handled_advanced and array_utils.is_array_like(ss[-1]):\n # Treat this as a shuffle.\n return self._advanced_single_array_subscript(\n sel=(ss[-1],), axis=len(ss) - 1\n )\n\n av: ArrayView = ArrayView.from_block_array(self)\n # TODO (hme): We don't have to create, but do so for now until we need to optimize.\n return av[item].create(BlockArray)\n\n def _advanced_single_array_subscript(self, sel: tuple, block_size=None, axis=0):\n def group_by_block(\n dst_grid_entry,\n dst_slice_tuples,\n src_grid,\n dst_index_list,\n src_index_list,\n axis=0,\n ):\n # Block grid entries needed to write to given dst_slice_selection.\n src_blocks = {}\n dst_slice_np = np.array(dst_slice_tuples).T\n dst_index_arr = np.array(dst_index_list)\n src_index_arr = np.array(src_index_list)\n # Pick the smallest type to represent indices.\n # A set of these indices may be transmitted over the network,\n # so we want to pick the smallest encoding possible.\n index_types = [\n (2 ** 8, np.uint8),\n (2 ** 16, np.uint16),\n (2 ** 32, np.uint32),\n (2 ** 64, np.uint64),\n ]\n index_type = None\n for bound, curr_index_type in index_types:\n if np.all(np.array(src_grid.block_shape[axis]) < bound) and np.all(\n dst_slice_np[1][axis] < bound\n ):\n index_type = curr_index_type\n break\n if index_type is None:\n raise Exception(\"Unable to encode block indices, blocks are too large.\")\n dst_entry_test = list(dst_grid_entry[:axis]) + list(\n dst_grid_entry[axis + 1 :]\n )\n num_pairs_check = 0\n for grid_entry in src_grid.get_entry_iterator():\n # Must match on every entry except axis.\n src_entry_test = list(grid_entry[:axis]) + list(grid_entry[axis + 1 :])\n if dst_entry_test != src_entry_test:\n # Skip this block.\n continue\n src_slice_np = np.array(src_grid.get_slice_tuples(grid_entry)).T\n index_pairs = []\n for i in range(src_index_arr.shape[0]):\n src_index = src_index_arr[i]\n dst_index = dst_index_arr[i]\n if np.all(\n (src_slice_np[0][axis] <= src_index)\n & (src_index < src_slice_np[1][axis])\n ):\n index_pair = (\n np.array(\n dst_index - dst_slice_np[0][axis], dtype=index_type\n ),\n np.array(\n src_index - src_slice_np[0][axis], dtype=index_type\n ),\n )\n index_pairs.append(index_pair)\n num_pairs_check += 1\n if len(index_pairs) > 0:\n src_blocks[grid_entry] = index_pairs\n assert num_pairs_check == len(dst_index_list)\n return src_blocks\n\n array = sel[0]\n assert len(array.shape) == 1\n assert np.all(0 <= array) and np.all(array < self.shape[axis])\n if block_size is None:\n block_size = self.block_shape[axis]\n axis_dim = len(array)\n shape = tuple(\n list(self.shape[:axis]) + [axis_dim] + list(self.shape[axis + 1 :])\n )\n block_shape = tuple(\n list(self.block_shape[:axis])\n + [block_size]\n + list(self.block_shape[axis + 1 :])\n )\n dst_arr = BlockArray.empty(\n shape=shape, block_shape=block_shape, dtype=self.dtype, cm=self.cm\n )\n\n for dst_grid_entry in dst_arr.grid.get_entry_iterator():\n dst_block: Block = dst_arr.blocks[dst_grid_entry]\n dst_slice_selection = dst_arr.grid.get_slice(dst_grid_entry)\n dst_index_array = selection.slice_to_range(\n dst_slice_selection[axis], shape[axis]\n )\n src_index_array = array[dst_slice_selection[axis]]\n assert len(dst_index_array) == len(src_index_array)\n # Can this be sped up by grouping all src blocks outside of this loop?\n src_blocks = group_by_block(\n dst_grid_entry,\n dst_arr.grid.get_slice_tuples(dst_grid_entry),\n self.grid,\n dst_index_array,\n src_index_array,\n axis,\n )\n for src_grid_entry in src_blocks:\n src_block: Block = self.blocks[src_grid_entry]\n index_pairs = src_blocks[src_grid_entry]\n syskwargs = {\n \"grid_entry\": dst_grid_entry,\n \"grid_shape\": dst_arr.grid.grid_shape,\n }\n dst_block.oid = self.cm.update_block_along_axis(\n dst_block.oid, src_block.oid, index_pairs, axis, syskwargs=syskwargs\n )\n return dst_arr\n\n def __setitem__(self, key, value):\n av: ArrayView = ArrayView.from_block_array(self)\n av[key] = value\n\n @staticmethod\n def to_block_array(obj, cm: ComputeManager, block_shape=None):\n if isinstance(obj, BlockArray):\n return obj\n if isinstance(obj, np.ndarray):\n np_array = obj\n elif isinstance(obj, list):\n np_array = np.array(obj)\n elif array_utils.is_scalar(obj):\n return BlockArray.from_scalar(obj, cm)\n else:\n raise Exception(\"Unsupported type %s\" % type(obj))\n if block_shape is None:\n block_shape = cm.get_block_shape(np_array.shape, np_array.dtype)\n return BlockArray.from_np(np_array, block_shape, False, cm)\n\n def check_or_convert_other(self, other, compute_block_shape=False):\n block_shape = None if compute_block_shape else self.block_shape\n return BlockArray.to_block_array(other, self.cm, block_shape=block_shape)\n\n def ufunc(self, op_name):\n result = self.copy()\n for grid_entry in self.grid.get_entry_iterator():\n result.blocks[grid_entry] = self.blocks[grid_entry].ufunc(op_name)\n return result\n\n def _tree_reduce(\n self, op_name, blocks_or_oids, result_grid_entry, result_grid_shape\n ):\n \"\"\"\n Basic tree reduce imp.\n Schedules op on same node as left operand.\n :param op_name: The reduction op.\n :param blocks_or_oids: A list of type Block or a list of tuples.\n Tuples must be of the form\n (oid, grid_entry, grid_shape, transposed)\n :param result_grid_entry: The grid entry of the result block. This will be used\n to compute the final reduction step.\n :param result_grid_shape: The grid entry of the result block. This will be used\n to compute the final reduction step.\n :return: The oid of the result.\n \"\"\"\n oid_list = blocks_or_oids\n if isinstance(blocks_or_oids[0], Block):\n oid_list = [\n (b.oid, b.grid_entry, b.grid_shape, b.transposed)\n for b in blocks_or_oids\n ]\n if len(oid_list) == 1:\n return oid_list[0][0]\n q = oid_list\n while len(q) > 1:\n a_oid, a_ge, a_gs, a_T = q.pop(0)\n b_oid, _, _, b_T = q.pop(0)\n ge, gs = (\n (result_grid_entry, result_grid_shape) if len(q) == 0 else (a_ge, a_gs)\n )\n c_oid = self.cm.bop_reduce(\n op_name,\n a_oid,\n b_oid,\n a_T,\n b_T,\n syskwargs={\n \"grid_entry\": ge,\n \"grid_shape\": gs,\n },\n )\n q.append((c_oid, ge, gs, False))\n r_oid, r_ge, r_gs, _ = q.pop(0)\n assert r_ge == result_grid_entry\n assert r_gs == result_grid_shape\n return r_oid\n\n def reduce_axis(self, op_name, axis, keepdims=False):\n if not (axis is None or isinstance(axis, (int, np.int32, np.int64))):\n raise NotImplementedError(\"Only integer axis is currently supported.\")\n block_reduced_oids = np.empty_like(self.blocks, dtype=tuple)\n for grid_entry in self.grid.get_entry_iterator():\n block = self.blocks[grid_entry]\n block_oid = self.cm.reduce_axis(\n op_name=op_name,\n arr=block.oid,\n axis=axis,\n keepdims=keepdims,\n transposed=block.transposed,\n syskwargs={\n \"grid_entry\": block.grid_entry,\n \"grid_shape\": block.grid_shape,\n },\n )\n block_reduced_oids[grid_entry] = (\n block_oid,\n block.grid_entry,\n block.grid_shape,\n False,\n )\n result_shape = []\n result_block_shape = []\n for curr_axis in range(len(self.shape)):\n axis_size, axis_block_size = (\n self.shape[curr_axis],\n self.block_shape[curr_axis],\n )\n if curr_axis == axis or axis is None:\n if keepdims:\n axis_size, axis_block_size = 1, 1\n else:\n continue\n result_shape.append(axis_size)\n result_block_shape.append(axis_block_size)\n result_shape = tuple(result_shape)\n result_block_shape = tuple(result_block_shape)\n result_dtype = array_utils.get_reduce_output_type(op_name, self.dtype)\n result_grid = ArrayGrid(\n shape=result_shape,\n block_shape=result_block_shape,\n dtype=result_dtype.__name__,\n )\n result = BlockArray(result_grid, self.cm)\n\n if axis is None:\n if result.shape == ():\n result_block: Block = result.blocks[()]\n else:\n result_block: Block = result.blocks[:].item()\n result_block.oid = self._tree_reduce(\n op_name,\n block_reduced_oids.flatten().tolist(),\n result_block.grid_entry,\n result_block.grid_shape,\n )\n else:\n for result_grid_entry in result_grid.get_entry_iterator():\n block_reduced_oids_axis = []\n for sum_dim in range(self.grid.grid_shape[axis]):\n grid_entry = list(result_grid_entry)\n if keepdims:\n grid_entry[axis] = sum_dim\n else:\n grid_entry = grid_entry[:axis] + [sum_dim] + grid_entry[axis:]\n grid_entry = tuple(grid_entry)\n block_reduced_oids_axis.append(block_reduced_oids[grid_entry])\n result_block: Block = result.blocks[result_grid_entry]\n result_block.oid = self._tree_reduce(\n op_name,\n block_reduced_oids_axis,\n result_block.grid_entry,\n result_block.grid_shape,\n )\n return result\n\n def __matmul__(self, other):\n if len(self.shape) > 2:\n # TODO (bcp): NumPy's implementation does a stacked matmul, which is not supported yet.\n raise NotImplementedError(\n \"Matrix multiply for tensors of rank > 2 not supported yet.\"\n )\n else:\n return self.tensordot(other, 1)\n\n def _compute_tensordot_syskwargs(self, self_block: Block, other_block: Block):\n # Schedule on larger block.\n if np.product(self_block.shape) >= np.product(other_block.shape):\n return self_block.true_grid_entry(), self_block.true_grid_shape()\n else:\n return other_block.true_grid_entry(), other_block.true_grid_shape()\n\n def tensordot(self, other, axes=2):\n if not isinstance(other, BlockArray):\n raise ValueError(\n \"Cannot automatically construct BlockArray for tensor operations.\"\n )\n\n if isinstance(axes, int):\n pass\n elif array_utils.is_array_like(axes):\n raise NotImplementedError(\"Non-integer axes is currently not supported.\")\n else:\n raise TypeError(f\"Unexpected axes type '{type(axes).__name__}'\")\n\n if array_utils.np_tensordot_param_test(\n self.shape, self.ndim, other.shape, other.ndim, axes\n ):\n raise ValueError(\"shape-mismatch for sum\")\n\n other = self.check_or_convert_other(other, compute_block_shape=True)\n\n this_axes = self.grid.grid_shape[:-axes]\n this_sum_axes = self.grid.grid_shape[-axes:]\n other_axes = other.grid.grid_shape[axes:]\n other_sum_axes = other.grid.grid_shape[:axes]\n assert this_sum_axes == other_sum_axes\n result_shape = tuple(self.shape[:-axes] + other.shape[axes:])\n result_block_shape = tuple(self.block_shape[:-axes] + other.block_shape[axes:])\n result_grid = ArrayGrid(\n shape=result_shape,\n block_shape=result_block_shape,\n dtype=array_utils.get_bop_output_type(\n \"tensordot\", self.dtype, other.dtype\n ).__name__,\n )\n assert result_grid.grid_shape == tuple(this_axes + other_axes)\n result = BlockArray(result_grid, self.cm)\n this_dims = list(itertools.product(*map(range, this_axes)))\n other_dims = list(itertools.product(*map(range, other_axes)))\n sum_dims = list(itertools.product(*map(range, this_sum_axes)))\n for i in this_dims:\n for j in other_dims:\n grid_entry = tuple(i + j)\n result_block: Block = result.blocks[grid_entry]\n sum_oids = []\n for k in sum_dims:\n self_block: Block = self.blocks[tuple(i + k)]\n other_block: Block = other.blocks[tuple(k + j)]\n dot_grid_args = self._compute_tensordot_syskwargs(\n self_block, other_block\n )\n dotted_oid = self.cm.bop(\n \"tensordot\",\n self_block.oid,\n other_block.oid,\n self_block.transposed,\n other_block.transposed,\n axes=axes,\n syskwargs={\n \"grid_entry\": dot_grid_args[0],\n \"grid_shape\": dot_grid_args[1],\n },\n )\n sum_oids.append(\n (dotted_oid, dot_grid_args[0], dot_grid_args[1], False)\n )\n result_block.oid = self._tree_reduce(\n \"sum\", sum_oids, result_block.grid_entry, result_block.grid_shape\n )\n return result\n\n def _fast_element_wise(self, op_name, other):\n \"\"\"\n Implements fast scheduling for basic element-wise operations.\n \"\"\"\n # Schedule the op first.\n blocks = np.empty(shape=self.grid.grid_shape, dtype=Block)\n for grid_entry in self.grid.get_entry_iterator():\n self_block: Block = self.blocks[grid_entry]\n other_block: Block = other.blocks[grid_entry]\n blocks[grid_entry] = block = Block(\n grid_entry=grid_entry,\n grid_shape=self_block.grid_shape,\n rect=self_block.rect,\n shape=self_block.shape,\n dtype=self_block.dtype,\n transposed=False,\n cm=self.cm,\n )\n block.oid = self.cm.bop(\n op_name,\n self_block.oid,\n other_block.oid,\n self_block.transposed,\n other_block.transposed,\n axes={},\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": self.grid.grid_shape,\n },\n )\n return BlockArray(self.grid.copy(), self.cm, blocks=blocks)\n\n def __add__(self, other):\n other = self.check_or_convert_other(other)\n if self.shape == other.shape:\n return self._fast_element_wise(\"add\", other)\n return BlockArray.from_blocks(\n self.blocks + other.blocks, result_shape=None, cm=self.cm\n )\n\n def __sub__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks - other.blocks, result_shape=None, cm=self.cm\n )\n\n def __mul__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks * other.blocks, result_shape=None, cm=self.cm\n )\n\n def __truediv__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks / other.blocks, result_shape=None, cm=self.cm\n )\n\n def __pow__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks ** other.blocks, result_shape=None, cm=self.cm\n )\n\n def __invert__(self):\n return self.ufunc(\"invert\")\n\n __iadd__ = __add__\n __isub__ = __sub__\n __imul__ = __mul__\n __imatmul__ = __matmul__\n __itruediv__ = __truediv__\n __ipow__ = __pow__\n\n # TODO (hme): Type check bool ops.\n def __bool__(self):\n # pylint: disable=no-member\n dtype = self.dtype\n if isinstance(dtype, type):\n # TODO (hme): Fix this strange issue.\n dtype = dtype()\n if isinstance(dtype, (bool, np.bool)) and np.sum(self.shape) == len(self.shape):\n return self.get().__bool__()\n return True\n\n def __inequality__(self, op, other):\n other = self.check_or_convert_other(other)\n assert (\n other.shape == () or other.shape == self.shape\n ), \"Currently supports comparison with scalars only.\"\n shape = array_utils.broadcast(self.shape, other.shape).shape\n block_shape = array_utils.broadcast_block_shape(\n self.shape, other.shape, self.block_shape\n )\n dtype = bool.__name__\n grid = ArrayGrid(shape, block_shape, dtype)\n result = BlockArray(grid, self.cm)\n for grid_entry in result.grid.get_entry_iterator():\n if other.shape == ():\n other_block: Block = other.blocks.item()\n else:\n other_block: Block = other.blocks[grid_entry]\n result.blocks[grid_entry] = self.blocks[grid_entry].bop(\n op, other_block, args={}\n )\n\n return result\n\n def __ge__(self, other):\n return self.__inequality__(\"ge\", other)\n\n def __gt__(self, other):\n return self.__inequality__(\"gt\", other)\n\n def __le__(self, other):\n return self.__inequality__(\"le\", other)\n\n def __lt__(self, other):\n return self.__inequality__(\"lt\", other)\n\n def __eq__(self, other):\n return self.__inequality__(\"eq\", other)\n\n def __ne__(self, other):\n return self.__inequality__(\"ne\", other)\n\n __radd__ = __add__\n\n def __rsub__(self, other):\n other = self.check_or_convert_other(other)\n return other - self\n\n __rmul__ = __mul__\n\n def __rmatmul__(self, other):\n other = self.check_or_convert_other(other)\n return other @ self\n\n def __rtruediv__(self, other):\n other = self.check_or_convert_other(other)\n return other / self\n\n def __rpow__(self, other):\n other = self.check_or_convert_other(other)\n return other ** self\n\n def __neg__(self):\n return -1 * self\n\n def __pos__(self):\n return self\n\n def astype(self, dtype):\n grid = ArrayGrid(self.shape, self.block_shape, dtype.__name__)\n result = BlockArray(grid, self.cm)\n for grid_entry in result.grid.get_entry_iterator():\n result.blocks[grid_entry] = self.blocks[grid_entry].astype(dtype)\n return result\n\n def flattened_oids(self):\n oids = []\n for grid_entry in self.grid.get_entry_iterator():\n oid = self.blocks[grid_entry].oid\n oids.append(oid)\n return oids\n\n\nclass Reshape(object):\n @staticmethod\n def compute_shape(shape, input_shape):\n size = np.product(shape)\n if -1 in input_shape:\n new_shape = []\n other_dim_prod = 1\n negative_one_seen = False\n for dim in input_shape:\n if dim == -1:\n if negative_one_seen:\n raise Exception(\"Only one -1 permitted in reshape.\")\n negative_one_seen = True\n continue\n other_dim_prod *= dim\n if size % other_dim_prod != 0:\n raise Exception(\"Invalid shape.\")\n for dim in input_shape:\n if dim == -1:\n new_shape.append(size // other_dim_prod)\n else:\n new_shape.append(dim)\n else:\n new_shape = input_shape\n assert size == np.product(new_shape)\n return new_shape\n\n def _group_index_lists_by_block(\n self, dst_slice_tuples, src_grid: ArrayGrid, dst_index_list, src_index_list\n ):\n # TODO(hme): Keep this function here until it's needed for greater support of\n # selection/assignment operations.\n # Block grid entries needed to write to given dst_slice_selection.\n src_blocks = {}\n dst_slice_np = np.array(dst_slice_tuples).T\n dst_index_arr = np.array(dst_index_list)\n src_index_arr = np.array(src_index_list)\n # Pick the smallest type to represent indices.\n # A set of these indices may be transmitted over the network,\n # so we want to pick the smallest encoding possible.\n index_types = [\n (2 ** 8, np.uint8),\n (2 ** 16, np.uint16),\n (2 ** 32, np.uint32),\n (2 ** 64, np.uint64),\n ]\n index_type = None\n for bound, curr_index_type in index_types:\n if np.all(np.array(src_grid.block_shape) < bound) and np.all(\n dst_slice_np[1] < bound\n ):\n index_type = curr_index_type\n break\n if index_type is None:\n raise Exception(\"Unable to encode block indices, blocks are too large.\")\n for grid_entry in src_grid.get_entry_iterator():\n src_slice_np = np.array(src_grid.get_slice_tuples(grid_entry)).T\n index_pairs = []\n for i in range(src_index_arr.shape[0]):\n src_index = src_index_arr[i]\n dst_index = dst_index_arr[i]\n if np.all(\n (src_slice_np[0] <= src_index) & (src_index < src_slice_np[1])\n ):\n index_pair = (\n (dst_index - dst_slice_np[0]).astype(index_type),\n (src_index - src_slice_np[0]).astype(index_type),\n )\n index_pairs.append(index_pair)\n if len(index_pairs) > 0:\n src_blocks[grid_entry] = index_pairs\n return src_blocks\n\n def _arbitrary_reshape(self, arr: BlockArray, shape, block_shape) -> BlockArray:\n # This is the worst-case scenario.\n # Generate index mappings per block, and group source indices to minimize\n # RPCs and generation of new objects.\n cm = arr.cm\n dst_arr = BlockArray.empty(\n shape=shape, block_shape=block_shape, dtype=arr.dtype, cm=cm\n )\n for dst_grid_entry in dst_arr.grid.get_entry_iterator():\n dst_block: Block = dst_arr.blocks[dst_grid_entry]\n dst_slice_selection = dst_arr.grid.get_slice(dst_grid_entry)\n dst_index_list = array_utils.slice_sel_to_index_list(dst_slice_selection)\n src_index_list = array_utils.translate_index_list(\n dst_index_list, shape, arr.shape\n )\n src_blocks = self._group_index_lists_by_block(\n dst_arr.grid.get_slice_tuples(dst_grid_entry),\n arr.grid,\n dst_index_list,\n src_index_list,\n )\n for src_grid_entry in src_blocks:\n src_block: Block = arr.blocks[src_grid_entry]\n index_pairs = src_blocks[src_grid_entry]\n syskwargs = {\n \"grid_entry\": dst_grid_entry,\n \"grid_shape\": dst_arr.grid.grid_shape,\n }\n dst_block.oid = cm.update_block_by_index(\n dst_block.oid, src_block.oid, index_pairs, syskwargs=syskwargs\n )\n return dst_arr\n\n def _block_shape_reshape(self, arr, block_shape):\n rarr: BlockArray = BlockArray.empty(arr.shape, block_shape, arr.dtype, arr.cm)\n for grid_entry in rarr.grid.get_entry_iterator():\n grid_entry_slice = rarr.grid.get_slice(grid_entry)\n # TODO (hme): This could be less costly.\n rarr[grid_entry_slice] = arr[grid_entry_slice]\n return rarr\n\n def _strip_ones(self, shape):\n return tuple(filter(lambda x: x != 1, shape))\n\n def _is_simple_reshape(self, arr: BlockArray, shape, block_shape):\n # Is the reshape a difference of factors of 1?\n # Strip out 1s and compare.\n return self._strip_ones(shape) == self._strip_ones(\n arr.shape\n ) and self._strip_ones(block_shape) == self._strip_ones(arr.block_shape)\n\n def _simple_reshape(self, arr, shape, block_shape):\n # Reshape the array of blocks only.\n # This is only used when the difference in shape are factors of 1s,\n # and the ordering of other factors are maintained.\n\n # Check assumptions.\n assert len(self._strip_ones(arr.shape)) == len(self._strip_ones(shape))\n\n # Create new grid, and perform reshape on blocks\n # to simplify access to source blocks.\n grid = ArrayGrid(shape, block_shape, dtype=arr.dtype.__name__)\n src_blocks = arr.blocks.reshape(grid.grid_shape)\n rarr = BlockArray(grid, arr.cm)\n for grid_entry in grid.get_entry_iterator():\n src_block: Block = src_blocks[grid_entry]\n dst_block: Block = rarr.blocks[grid_entry]\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n dst_block.oid = arr.cm.reshape(\n src_block.oid, dst_block.shape, syskwargs=syskwargs\n )\n return rarr\n\n def _validate(self, arr, shape, block_shape):\n assert -1 not in shape\n assert -1 not in block_shape\n assert len(shape) == len(block_shape)\n assert np.product(arr.shape) == np.product(shape)\n\n def __call__(self, arr: BlockArray, shape, block_shape):\n self._validate(arr, shape, block_shape)\n if arr.shape == shape and arr.block_shape == block_shape:\n return arr\n elif self._is_simple_reshape(arr, shape, block_shape):\n return self._simple_reshape(arr, shape, block_shape)\n elif arr.shape == shape and arr.block_shape != block_shape:\n return self._block_shape_reshape(arr, block_shape)\n elif arr.shape != shape and arr.block_shape == block_shape:\n # Just do full reshape for this case as well.\n # Though there may be a better solution, we generally expect\n # the block shape to change with array shape.\n return self._arbitrary_reshape(arr, shape, block_shape)\n else:\n assert arr.shape != shape and arr.block_shape != block_shape\n return self._arbitrary_reshape(arr, shape, block_shape)\n"
] | [
[
"numpy.sum",
"numpy.empty",
"numpy.core.numeric.normalize_axis_tuple",
"numpy.copy",
"numpy.ndarray",
"numpy.empty_like",
"numpy.all",
"numpy.product",
"numpy.array"
]
] |
WenqiJiang/FPGA-Accelerator-for-Recommender-Systems | [
"6c3031487cd1447b7f5362483c14b108177387bb"
] | [
"tf_wide_deep_377_table_2048/python/train.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: lapis-hong\n# @Date : 2018/1/15\n\"\"\"Training Wide and Deep Model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport time\n\nimport tensorflow as tf\n\nfrom lib.read_conf import Config\nfrom lib.dataset import input_fn\nfrom lib.build_estimator import build_estimator, build_custom_estimator\nfrom lib.utils.util import elapse_time, list_files\n\nCONFIG = Config().train\nparser = argparse.ArgumentParser(description='Train Wide and Deep Model.')\n\nparser.add_argument(\n '--model_dir', type=str, default=CONFIG[\"model_dir\"],\n help='Base directory for the model.')\nparser.add_argument(\n '--model_type', type=str, default=CONFIG[\"model_type\"],\n help=\"Valid model types: {'wide', 'deep', 'wide_deep'}.\")\nparser.add_argument(\n '--train_epochs', type=int, default=CONFIG[\"train_epochs\"],\n help='Number of training epochs.')\nparser.add_argument(\n '--epochs_per_eval', type=int, default=CONFIG[\"epochs_per_eval\"],\n help='The number of training epochs to run between evaluations.')\nparser.add_argument(\n '--batch_size', type=int, default=CONFIG[\"batch_size\"],\n help='Number of examples per batch.')\nparser.add_argument(\n '--train_data', type=str, default=CONFIG[\"train_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--eval_data', type=str, default=CONFIG[\"eval_data\"],\n help='Path to the validation data.')\nparser.add_argument(\n '--pred_data', type=str, default=CONFIG[\"pred_data\"],\n help='Path to the validation data.')\nparser.add_argument(\n '--test_data', type=str, default=CONFIG[\"test_data\"],\n help='Path to the test data.')\nparser.add_argument(\n '--image_train_data', type=str, default=CONFIG[\"image_train_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--image_eval_data', type=str, default=CONFIG[\"image_eval_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--image_test_data', type=str, default=CONFIG[\"image_test_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--keep_train', type=int, default=CONFIG[\"keep_train\"],\n help='Whether to keep training on previous trained model.')\n\n\ndef train_and_eval(model):\n for n in range(FLAGS.train_epochs):\n tf.logging.info('=' * 30 + ' START EPOCH {} '.format(n + 1) + '=' * 30 + '\\n')\n train_data_list = list_files(FLAGS.train_data) # dir to file list\n for f in train_data_list:\n t0 = time.time()\n tf.logging.info('<EPOCH {}>: Start training {}'.format(n + 1, f))\n model.train(\n input_fn=lambda: input_fn(f, FLAGS.image_train_data, 'train', FLAGS.batch_size),\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None)\n tf.logging.info('<EPOCH {}>: Finish training {}, take {} mins'.format(n + 1, f, elapse_time(t0)))\n print('-' * 80)\n tf.logging.info('<EPOCH {}>: Start evaluating {}'.format(n + 1, FLAGS.eval_data))\n t0 = time.time()\n results = model.evaluate(\n input_fn=lambda: input_fn(FLAGS.eval_data, FLAGS.image_eval_data, 'eval', FLAGS.batch_size),\n steps=None, # Number of steps for which to evaluate model.\n hooks=None,\n checkpoint_path=None, # latest checkpoint in model_dir is used.\n name=None)\n tf.logging.info('<EPOCH {}>: Finish evaluation {}, take {} mins'.format(n + 1, FLAGS.eval_data, elapse_time(t0)))\n print('-' * 80)\n # Display evaluation metrics\n for key in sorted(results):\n print('{}: {}'.format(key, results[key]))\n # every epochs_per_eval test the model (use larger test dataset)\n if (n+1) % FLAGS.epochs_per_eval == 0:\n tf.logging.info('<EPOCH {}>: Start testing {}'.format(n + 1, FLAGS.test_data))\n results = model.evaluate(\n input_fn=lambda: input_fn(FLAGS.test_data, FLAGS.image_test_data, 'pred', FLAGS.batch_size),\n steps=None, # Number of steps for which to evaluate model.\n hooks=None,\n checkpoint_path=None, # If None, the latest checkpoint in model_dir is used.\n name=None)\n tf.logging.info('<EPOCH {}>: Finish testing {}, take {} mins'.format(n + 1, FLAGS.test_data, elapse_time(t0)))\n print('-' * 80)\n # Display evaluation metrics\n for key in sorted(results):\n print('{}: {}'.format(key, results[key]))\n\n\ndef dynamic_train(model):\n \"\"\"Dynamic train mode.\n For example:\n train_data_files: [0301, 0302, 0303, ...]\n train mode:\n first take 0301 as train data, 0302 as test data;\n then keep training take 0302 as train data, 0303 as test data ...\n \"\"\"\n data_files = list_files(FLAGS.train_data)\n data_files.sort()\n assert len(data_files) > 1, 'Dynamic train mode need more than 1 data file'\n\n for i in range(len(data_files)-1):\n train_data = data_files[i]\n test_data = data_files[i+1]\n tf.logging.info('=' * 30 + ' START TRAINING DATA: {} '.format(train_data) + '=' * 30 + '\\n')\n for n in range(FLAGS.train_epochs):\n t0 = time.time()\n tf.logging.info('START TRAIN DATA <{}> <EPOCH {}>'.format(train_data, n + 1))\n model.train(\n input_fn=lambda: input_fn(train_data, FLAGS.image_train_data, 'train', FLAGS.batch_size),\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None)\n tf.logging.info('FINISH TRAIN DATA <{}> <EPOCH {}> take {} mins'.format(train_data, n + 1, elapse_time(t0)))\n print('-' * 80)\n tf.logging.info('START EVALUATE TEST DATA <{}> <EPOCH {}>'.format(test_data, n + 1))\n t0 = time.time()\n results = model.evaluate(\n input_fn=lambda: input_fn(test_data, FLAGS.image_eval_data, 'eval', FLAGS.batch_size),\n steps=None, # Number of steps for which to evaluate model.\n hooks=None,\n checkpoint_path=None, # latest checkpoint in model_dir is used.\n name=None)\n tf.logging.info('FINISH EVALUATE TEST DATA <{}> <EPOCH {}>: take {} mins'.format(test_data, n + 1, elapse_time(t0)))\n print('-' * 80)\n # Display evaluation metrics\n for key in sorted(results):\n print('{}: {}'.format(key, results[key]))\n\n\ndef train(model):\n for n in range(FLAGS.train_epochs):\n tf.logging.info('=' * 30 + ' START EPOCH {} '.format(n + 1) + '=' * 30 + '\\n')\n train_data_list = list_files(FLAGS.train_data) # dir to file list\n for f in train_data_list:\n t0 = time.time()\n tf.logging.info('<EPOCH {}>: Start training {}'.format(n + 1, f))\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n model.train(\n input_fn=lambda: input_fn(f, FLAGS.image_train_data, 'train', FLAGS.batch_size),\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None)\n tf.logging.info('<EPOCH {}>: Finish training {}, take {} mins'.format(n + 1, f, elapse_time(t0)))\n\n\ndef train_and_eval_api(model):\n train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(FLAGS.train_data, FLAGS.image_train_data, FLAGS.batch_size), max_steps=10000)\n eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(FLAGS.eval_data, FLAGS.image_eval_data, FLAGS.batch_size))\n tf.estimator.train_and_evaluate(model, train_spec, eval_spec)\n\n\ndef main(unused_argv):\n CONFIG = Config()\n print(\"Using TensorFlow Version %s\" % tf.__version__)\n # assert \"1.4\" <= tf.__version__, \"Need TensorFlow r1.4 or Later.\"\n print('\\nModel Type: {}'.format(FLAGS.model_type))\n model_dir = os.path.join(FLAGS.model_dir, FLAGS.model_type)\n print('\\nModel Directory: {}'.format(model_dir))\n\n print(\"\\nUsing Train Config:\")\n for k, v in CONFIG.train.items():\n print('{}: {}'.format(k, v))\n print(\"\\nUsing Model Config:\")\n for k, v in CONFIG.model.items():\n print('{}: {}'.format(k, v))\n\n if not FLAGS.keep_train:\n # Clean up the model directory if not keep training\n shutil.rmtree(model_dir, ignore_errors=True)\n print('Remove model directory: {}'.format(model_dir))\n # model = build_estimator(model_dir, FLAGS.model_type)\n model = build_custom_estimator(model_dir, FLAGS.model_type)\n tf.logging.info('Build estimator: {}'.format(model))\n\n if CONFIG.train['dynamic_train']:\n train_fn = dynamic_train\n print(\"Using dynamic train mode.\")\n else:\n train_fn = train_and_eval\n\n if CONFIG.distribution[\"is_distribution\"]:\n print(\"Using PID: {}\".format(os.getpid()))\n cluster = CONFIG.distribution[\"cluster\"]\n job_name = CONFIG.distribution[\"job_name\"]\n task_index = CONFIG.distribution[\"task_index\"]\n print(\"Using Distributed TensorFlow. Local host: {} Job_name: {} Task_index: {}\"\n .format(cluster[job_name][task_index], job_name, task_index))\n cluster = tf.train.ClusterSpec(CONFIG.distribution[\"cluster\"])\n server = tf.train.Server(cluster,\n job_name=job_name,\n task_index=task_index)\n # distributed can not including eval.\n train_fn = train\n if job_name == 'ps':\n # wait for incoming connection forever\n server.join()\n # sess = tf.Session(server.target)\n # queue = create_done_queue(task_index, num_workers)\n # for i in range(num_workers):\n # sess.run(queue.dequeue())\n # print(\"ps {} received worker {} done\".format(task_index, i)\n # print(\"ps {} quitting\".format(task_index))\n else: # TODO:supervisor & MonotoredTrainingSession & experiment (deprecated)\n train_fn(model)\n # train_and_eval(model)\n # Each worker only needs to contact the PS task(s) and the local worker task.\n # config = tf.ConfigProto(device_filters=[\n # '/job:ps', '/job:worker/task:%d' % arguments.task_index])\n # with tf.device(tf.train.replica_device_setter(\n # worker_device=\"/job:worker/task:%d\" % task_index,\n # cluster=cluster)):\n # e = _create_experiment_fn()\n # e.train_and_evaluate() # call estimator's train() and evaluate() method\n # hooks = [tf.train.StopAtStepHook(last_step=10000)]\n # with tf.train.MonitoredTrainingSession(\n # master=server.target,\n # is_chief=(task_index == 0),\n # checkpoint_dir=args.model_dir,\n # hooks=hooks) as mon_sess:\n # while not mon_sess.should_stop():\n # # mon_sess.run()\n # classifier.fit(input_fn=train_input_fn, steps=1)\n else:\n # local run\n train_fn(model)\n\n\nif __name__ == '__main__':\n # Set to INFO for tracking training, default is WARN. ERROR for least messages\n tf.logging.set_verbosity(tf.logging.INFO)\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.train.Server",
"tensorflow.app.run",
"tensorflow.logging.set_verbosity",
"tensorflow.train.ClusterSpec",
"tensorflow.estimator.train_and_evaluate"
]
] |
molokhovdmitry/placeholder | [
"cc0a983af91fcbea3dcd7b9a16db471b000b5ff5"
] | [
"model/create.py"
] | [
"\"\"\"\nMIT License\n\nCopyright (c) 2021 molokhovdmitry\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\"\"\"This file creates the model (model.h5) and class (classes.txt) files.\"\"\"\n\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nfrom config import (DOWNLOAD_PATH, MODEL_PATH, IMG_SIZE,\n EPOCHS, DROPOUT, VALIDATION_SPLIT,\n BATCH_SIZE, SHUFFLE_BUFFER, PREFETCH_BUFFER,\n VISUALIZE_RESULTS)\n\n\nDATA_PATH = Path.joinpath(Path(DOWNLOAD_PATH), \"frames\")\nMODEL_PATH = Path(MODEL_PATH)\nMODEL_FILE = Path.joinpath(MODEL_PATH, \"model.h5\")\nCLASS_FILE = Path.joinpath(MODEL_PATH, \"classes.txt\")\n\nIMG_HEIGHT = IMG_SIZE[\"height\"]\nIMG_WIDTH = IMG_SIZE[\"width\"]\n\n# Get all classes.\nCLASS_NAMES = [category.name for category in DATA_PATH.iterdir()]\nNUM_CLASSES = len(CLASS_NAMES)\n\n# Save classes in a txt file.\nCLASS_FILE.touch()\nclasses = \"\"\nfor name in CLASS_NAMES:\n classes += str(name) + '\\n'\nCLASS_FILE.write_text(classes)\n\n\n\"\"\"\nGPU support fix.\nhttps://github.com/tensorflow/tensorflow/issues/24828#issuecomment-464910864\n\"\"\"\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.compat.v1.Session(config=config)\n\n\ndef create():\n \"\"\"Creates a model.\"\"\"\n # Load the data.\n train_ds, val_ds = load_data(str(DATA_PATH))\n\n # Create and compile the model.\n model = get_model()\n model.summary()\n\n # Fit the model and save the history.\n history = model.fit(train_ds, validation_data=val_ds, epochs=EPOCHS)\n\n # Save the model to a file.\n model.save(str(MODEL_FILE))\n print(\"Model saved.\")\n\n if VISUALIZE_RESULTS:\n # Make loss and accuracy plots with history data.\n make_plots(history, EPOCHS)\n\n\ndef load_data(data_dir):\n \"\"\"Loads the data. Returns tuple (`train_ds`, `val_ds`).\"\"\"\n # Training data.\n train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=VALIDATION_SPLIT,\n subset=\"training\",\n seed=123,\n image_size=(IMG_HEIGHT, IMG_WIDTH),\n batch_size=BATCH_SIZE\n )\n\n # Validation data.\n val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=VALIDATION_SPLIT,\n subset=\"validation\",\n seed=123,\n image_size=(IMG_HEIGHT, IMG_WIDTH),\n batch_size=BATCH_SIZE\n )\n\n # Configure the dataset for performance.\n train_ds = train_ds.shuffle(SHUFFLE_BUFFER).\\\n prefetch(buffer_size=PREFETCH_BUFFER)\n val_ds = val_ds.prefetch(buffer_size=PREFETCH_BUFFER)\n\n return train_ds, val_ds\n\n\ndef get_model():\n \"\"\"Creates and compiles neural network.\"\"\"\n model = Sequential([\n layers.experimental.preprocessing.\\\n Rescaling(1./255, input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),\n layers.Conv2D(128, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(256, activation='relu'),\n layers.Dropout(DROPOUT),\n layers.Dense(NUM_CLASSES),\n ])\n\n model.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n\n return model\n\n\ndef make_plots(history, epochs):\n \"\"\"Visualizes training results.\"\"\"\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n\n loss = history.history['loss']\n val_loss = history = history.history['val_loss']\n epochs_range = range(epochs)\n\n plt.figure(figsize=(8, 8))\n\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, acc, label=\"Training Accuracy\")\n plt.plot(epochs_range, val_acc, label=\"Validation Accuracy\")\n plt.legend(loc=\"lower right\")\n plt.title(\"Training and Validation Accuracy\")\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label=\"Traing Loss\")\n plt.plot(epochs_range, val_loss, label=\"Validation Loss\")\n plt.legend(loc=\"upper right\")\n plt.title(\"Training and Validation Loss\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n create()\n"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.compat.v1.Session",
"matplotlib.pyplot.legend",
"tensorflow.keras.layers.Dropout",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"matplotlib.pyplot.plot",
"tensorflow.keras.preprocessing.image_dataset_from_directory"
]
] |
kalosisz/tensorflow | [
"b7ecd75b24f577b73500024fe91d2ea0c806d05a"
] | [
"tensorflow/python/framework/extension_type_test.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.framework.extension_type.\"\"\"\n\nimport contextlib\nimport tempfile\nimport typing\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import extension_type\nfrom tensorflow.python.framework import extension_type_field\nfrom tensorflow.python.framework import immutable_dict\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\n\n\nclass MaskedTensorV1(extension_type.ExtensionType):\n \"\"\"Example subclass of ExtensionType, used for testing.\"\"\"\n values: ops.Tensor\n mask: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n\n\nclass MaskedTensorV2(extension_type.ExtensionType):\n \"\"\"Example subclass of ExtensionType, used for testing.\n\n This version adds methods, classmethod, staticmethod, and properties, and\n customizes `__repr__` and `__validate__`. It also adds a `__name__` field,\n which enables serialization.\n \"\"\"\n __name__ = 'tf.test.MaskedTensorV2'\n\n values: ops.Tensor\n mask: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n\n def __repr__(self):\n if hasattr(self.values, 'numpy') and hasattr(self.mask, 'numpy'):\n return '<MaskedTensorV2 %s>' % _masked_array_repr(self.values.numpy(),\n self.mask.numpy())\n else:\n return super(MaskedTensorV2, self).__repr__()\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n @classmethod\n def from_full_tensor(cls, values):\n return cls(values, array_ops.ones_like(values, dtype=dtypes.bool))\n\n # A dummy example to test support of staticmethod\n @staticmethod\n def doc_link():\n return 'http://example.com/masked_tensor'\n\n def __validate__(self):\n self.values.shape.assert_is_compatible_with(self.mask.shape)\n\n def with_default(self, default):\n return array_ops.where_v2(self.mask, self.values, default)\n\n __add__ = math_ops.add\n __sub__ = math_ops.subtract\n\n\ndef _masked_array_repr(values, mask):\n \"\"\"Returns a string representation for a masked numpy array.\"\"\"\n assert len(values) == len(mask)\n if len(values.shape) == 1:\n items = [repr(v) if m else '_' for (v, m) in zip(values, mask)]\n else:\n items = [_masked_array_repr(v, m) for (v, m) in zip(values, mask)]\n return '[%s]' % ', '.join(items)\n\n\nclass ForwardRefA(extension_type.ExtensionType):\n x: typing.Tuple[typing.Union['ForwardRefA', 'ForwardRefB'], ...]\n y: 'ForwardRefB'\n\n\nclass ForwardRefB(extension_type.ExtensionType):\n z: 'ForwardRefB'\n n: ops.Tensor\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ExtensionTypeTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def testAttributeAccessors(self):\n mt1 = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n mt2 = extension_type.pack(mt1)\n\n for mt in [mt1, mt2]:\n self.assertIsInstance(mt.values, ops.Tensor)\n self.assertAllEqual(mt.values, [1, 2, 3, 4])\n self.assertIsInstance(mt.mask, ops.Tensor)\n self.assertAllEqual(mt.mask, [True, True, False, True])\n\n def testAttributesAreImmutable(self):\n mt1 = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n mt2 = extension_type.pack(mt1)\n\n for mt in [mt1, mt2]:\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `score` outside the custom constructor of ExtensionType'\n ):\n mt.score = 12\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `values` outside the custom constructor of ExtensionType'\n ):\n mt.values = constant_op.constant([4, 3, 2, 1])\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `values` outside the custom constructor of ExtensionType'\n ):\n del mt.values\n\n def testClassAndStaticMethod(self):\n mt = MaskedTensorV2.from_full_tensor([1, 2, 3, 4])\n self.assertAllEqual(mt.mask, [True, True, True, True])\n self.assertEqual(mt.doc_link(), 'http://example.com/masked_tensor')\n\n def testRepr(self):\n values = constant_op.constant([1, 2, 3, 4])\n mask = constant_op.constant([True, True, False, True])\n mt = MaskedTensorV1(values, mask)\n expected = f'MaskedTensorV1(values={values!r}, mask={mask!r})'\n self.assertEqual(expected, repr(mt))\n\n def testEagerRepr(self):\n values = constant_op.constant([1, 2, 3, 4])\n mask = constant_op.constant([True, True, False, True])\n mt = MaskedTensorV2(values, mask)\n if context.executing_eagerly():\n expected = '<MaskedTensorV2 [1, 2, _, 4]>'\n else:\n expected = f'MaskedTensorV2(values={values!r}, mask={mask!r})'\n\n self.assertEqual(expected, repr(mt))\n self.assertEqual(expected, repr(mt))\n\n def testConstructorSignature(self):\n\n class MyType(extension_type.ExtensionType):\n x: ops.Tensor\n y: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n z: typing.Tuple[typing.Union[int, str], ...] = [1, 'two', 3]\n\n expected_parameters = [\n tf_inspect.Parameter('self',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter(\n 'x',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=ops.Tensor),\n tf_inspect.Parameter(\n 'y',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)),\n tf_inspect.Parameter(\n 'z',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=typing.Tuple[typing.Union[int, str], ...],\n default=(1, 'two', 3)),\n ]\n expected_sig = tf_inspect.Signature(\n expected_parameters, return_annotation=MyType)\n self.assertEqual(expected_sig, tf_inspect.signature(MyType.__init__))\n\n def testEmptyType(self):\n\n class EmptyType(extension_type.ExtensionType):\n pass\n\n self.assertEmpty(EmptyType._tf_extension_type_fields())\n x = EmptyType()\n self.assertEqual(repr(x), 'EmptyType()')\n\n def testCustomConstrutor(self):\n\n class SummarizedTensor(extension_type.ExtensionType):\n values: ops.Tensor\n mean: ops.Tensor\n max: ops.Tensor\n\n def __init__(self, values):\n self.values = ops.convert_to_tensor(values)\n self.mean = math_ops.reduce_mean(values)\n self.max = math_ops.reduce_max(values)\n\n x = SummarizedTensor([[1.0, 2, 3], [4, 5, 6]])\n self.assertAllEqual(x.values, [[1.0, 2, 3], [4, 5, 6]])\n self.assertAllEqual(x.mean, 3.5)\n self.assertAllEqual(x.max, 6)\n\n class Node(extension_type.ExtensionType):\n x: ops.Tensor\n y: typing.Optional[str] = None\n children: typing.Tuple['ExtensionTypeTest.Node', ...] = ()\n\n def testCustomConstructorWithDefaultValues(self):\n a = ExtensionTypeTest.Node(5)\n self.assertAllEqual(a.x, 5)\n self.assertIsNone(a.y)\n self.assertEqual(a.children, ())\n\n b = ExtensionTypeTest.Node(6, 'blue')\n self.assertAllEqual(b.x, 6)\n self.assertEqual(b.y, 'blue')\n self.assertEqual(b.children, ())\n\n c = ExtensionTypeTest.Node(7, children=(a, b))\n self.assertAllEqual(c.x, 7)\n self.assertIsNone(c.y)\n self.assertEqual(c.children, (a, b))\n\n def testCustomConstructorNondefaultCanotFollowDefault(self):\n with self.assertRaisesRegex(\n ValueError, \"Field without default 'd' follows field with default 'c'\"):\n\n class MyType(extension_type.ExtensionType):\n a: int\n b: str = 'Hello world'\n c: typing.Optional[ops.Tensor] = None\n d: ops.Tensor\n\n del MyType\n\n def testCustomConstrutorCantMutateNestedValues(self):\n\n class Foo(extension_type.ExtensionType):\n x: int\n\n class Bar(extension_type.ExtensionType):\n foo: Foo\n\n def __init__(self, foo):\n foo.x = 33 # This raises an exception\n\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `x` outside the custom constructor of ExtensionType'\n ):\n Bar(Foo(12))\n\n def testCustomValidate(self):\n\n class AlignedTensors(extension_type.ExtensionType):\n x: ops.Tensor\n y: ops.Tensor\n\n def __validate__(self):\n self.x.shape.assert_is_compatible_with(self.y.shape)\n\n aligned = AlignedTensors([1, 2, 3], ['a', 'b', 'c'])\n self.assertAllEqual(aligned.x, [1, 2, 3])\n self.assertAllEqual(aligned.y, [b'a', b'b', b'c'])\n\n with self.assertRaises(ValueError):\n AlignedTensors([1, 2, 3], ['a', 'b', 'c', 'd'])\n\n def testEquals(self):\n\n class MyType(extension_type.ExtensionType):\n values: ops.Tensor\n score: ops.Tensor\n flavor: str\n\n x1 = MyType([1, 2], 8, 'blue')\n x2 = MyType([1, 2], 8, 'blue')\n y = MyType([1, 2], 8, 'red')\n z = MyType([1, 2], 7, 'blue')\n self.assertAllEqual(x1 == x2, True)\n self.assertAllEqual(x1 != x2, False)\n self.assertAllEqual(x1 == y, False)\n self.assertAllEqual(x1 != y, True)\n self.assertAllEqual(x1 == z, False)\n self.assertAllEqual(y == z, False)\n\n # These are not equal, even though their values are broadcast-compatible\n # and elements are all equal when we broadcast. Shapes must match.\n a = MyType([1, 1, 1, 1], 0, 'x')\n b = MyType([[1, 1, 1, 1]], 0, 'x')\n c = MyType([[1, 1], [1, 1]], 0, 'x')\n self.assertAllEqual(a == b, False)\n self.assertAllEqual(a == c, False)\n self.assertAllEqual(b == c, False)\n\n # Test with unknown shapes (executes a different codepath).\n a_ph = replace_tensors_with_placeholders(a)\n b_ph = replace_tensors_with_placeholders(b)\n c_ph = replace_tensors_with_placeholders(c)\n self.assertAllEqual(a_ph == b_ph, False)\n self.assertAllEqual(a_ph == c_ph, False)\n self.assertAllEqual(b_ph == c_ph, False)\n\n def testPassIntoTfFunction(self):\n\n @def_function.function\n def fn(x):\n return x.with_default(99)\n\n mt = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n self.assertAllEqual([1, 2, 99, 4], fn(mt))\n self.assertAllEqual([1, 2, 99, 4], fn(extension_type.pack(mt)))\n\n def testReturnFromTfFunction(self):\n\n @def_function.function\n def mask_neg_values(x):\n return MaskedTensorV2(x, x > 0)\n\n @def_function.function\n def mask_neg_values_packed(x):\n return extension_type.pack(MaskedTensorV2(x, x > 0))\n\n expected = MaskedTensorV2([5, 8, -3, 9], [True, True, False, True])\n\n actual1 = mask_neg_values(constant_op.constant([5, 8, -3, 9]))\n self.assertIsInstance(actual1, MaskedTensorV2)\n self.assertAllEqual(expected.values, actual1.values)\n self.assertAllEqual(expected.mask, actual1.mask)\n\n actual2 = mask_neg_values_packed(constant_op.constant([5, 8, -3, 9]))\n self.assertIsInstance(actual2, MaskedTensorV2)\n self.assertTrue(extension_type.is_packed(actual2))\n self.assertAllEqual(expected.values, actual2.values)\n self.assertAllEqual(expected.mask, actual2.mask)\n\n def testCaptureByTfFunction(self):\n x = MaskedTensorV2(\n values=[[1, 2, 3], [4, 5, 6]],\n mask=[[True, True, True], [True, False, True]])\n\n @def_function.function\n def add_to_x(y):\n return MaskedTensorV2(x.values + y.values, x.mask & y.mask)\n\n actual = add_to_x(MaskedTensorV2([10, 20, 30], [False, True, True]))\n expected = MaskedTensorV2(\n values=[[11, 22, 33], [14, 25, 36]],\n mask=[[False, True, True], [False, False, True]])\n self.assertIsInstance(actual, MaskedTensorV2)\n self.assertAllEqual(expected.values, actual.values)\n self.assertAllEqual(expected.mask, actual.mask)\n\n def testTfFunctionArgMutationError(self):\n\n @def_function.function\n def fn_with_side_effect(mts):\n mts.append(MaskedTensorV1(mts[0].values * 2, mts[0].mask))\n\n with self.assertRaisesRegex(ValueError, 'should not modify'):\n fn_with_side_effect([MaskedTensorV1([10, 20, 30], [False, True, True])])\n\n def testNestPackUnpack(self):\n\n class CandyStore(extension_type.ExtensionType):\n name: ops.Tensor\n prices: typing.Mapping[str, ops.Tensor]\n\n store = CandyStore('Yum', {'gum': [0.42, 0.48], 'chocolate': [0.83, 1.02]})\n components = nest.flatten(store, expand_composites=True)\n repacked_1 = nest.pack_sequence_as(\n store, components, expand_composites=True)\n repacked_2 = nest.pack_sequence_as(\n store._type_spec, components, expand_composites=True)\n\n # Note: dicts get sorted by key.\n self.assertLen(components, 3)\n self.assertAllEqual(components[0], b'Yum')\n self.assertAllClose(components[1], [0.83, 1.02])\n self.assertAllClose(components[2], [0.42, 0.48])\n\n for repacked in [repacked_1, repacked_2]:\n self.assertAllEqual(repacked.name, b'Yum')\n self.assertAllClose(repacked.prices['gum'], [0.42, 0.48])\n self.assertAllClose(repacked.prices['chocolate'], [0.83, 1.02])\n\n def testSimpleCond(self):\n x = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n y = MaskedTensorV1([5, 6, 7, 8], [False, True, True, False])\n\n x_2 = control_flow_ops.cond(\n constant_op.constant(True), lambda: x, lambda: y)\n y_2 = control_flow_ops.cond(\n constant_op.constant(False), lambda: x, lambda: y)\n\n self.assertAllEqual(x.values, x_2.values)\n self.assertAllEqual(x.mask, x_2.mask)\n self.assertAllEqual(y.values, y_2.values)\n self.assertAllEqual(y.mask, y_2.mask)\n\n def testComplexCond(self):\n mt = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n\n def true_fn():\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, mt.values, -1), mt.values > 3)\n\n def false_fn():\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, 100, mt.values * 2),\n math_ops.logical_not(mt.mask))\n\n x = control_flow_ops.cond(constant_op.constant(True), true_fn, false_fn)\n y = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)\n\n self.assertAllEqual(x.values, [1, -1, 3, -1])\n self.assertAllEqual(x.mask, [False, False, False, True])\n self.assertAllEqual(y.values, [100, 4, 100, 8])\n self.assertAllEqual(y.mask, [False, True, False, True])\n\n def testCondAutograph(self):\n\n @def_function.function\n def fn(mt):\n if mt.values[3] > 3:\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, mt.values, -1), mt.values > 3)\n else:\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, 100, mt.values * 2), not mt.mask)\n\n x = fn(MaskedTensorV1([1, 2, 3, 4], [True, False, True, False]))\n self.assertAllEqual(x.values, [1, -1, 3, -1])\n self.assertAllEqual(x.mask, [False, False, False, True])\n\n def testCondTypeMismatch(self):\n if context.executing_eagerly:\n # In eager mode, tf.cond eagerly runs either true_fn or false_fn, and\n # ignores the other one; so it doesn't detect any type mismatches\n # between the two outcomes. (See _eager_cond_implementation in\n # control_flow_ops.py.)\n return\n\n a = lambda: MaskedTensorV1([1, 2, 3], [True, True, False])\n b = lambda: MaskedTensorV1(['a', 'b', 'c'], [False, True, True])\n c = lambda: MaskedTensorV2([4, 5, 6], [True, True, False])\n d = lambda: constant_op.constant([7, 8, 9])\n\n with self.assertRaisesRegex(\n ValueError,\n 'Incompatible return values of true_fn and false_fn: The two '\n \"structures don't have the same nested structure\"):\n control_flow_ops.cond(constant_op.constant(True), a, b)\n with self.assertRaisesRegex(\n TypeError, 'Incompatible return types of true_fn and false_fn: The two '\n \"structures don't have the same nested structure\"):\n control_flow_ops.cond(constant_op.constant(True), a, c)\n with self.assertRaisesRegex(\n ValueError,\n 'Incompatible return values of true_fn and false_fn: The two '\n \"structures don't have the same nested structure\"):\n control_flow_ops.cond(constant_op.constant(True), a, d)\n\n def testCondPacked(self):\n x = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n y = MaskedTensorV2([5, 6, 7, 8], [False, True, True, False])\n x = extension_type.pack(x)\n y = extension_type.pack(y)\n\n x_2 = control_flow_ops.cond(\n constant_op.constant(True), lambda: x, lambda: y)\n y_2 = control_flow_ops.cond(\n constant_op.constant(False), lambda: x, lambda: y)\n\n self.assertAllEqual(x.values, x_2.values)\n self.assertAllEqual(x.mask, x_2.mask)\n self.assertAllEqual(y.values, y_2.values)\n self.assertAllEqual(y.mask, y_2.mask)\n\n a = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n b = extension_type.pack(a)\n b = control_flow_ops.cond(\n constant_op.constant(True), lambda: array_ops.size(a.mask),\n lambda: array_ops.size(a.values))\n self.assertAllEqual(b, 4)\n\n # Note: the following example would fail (with `Retval[0] does not have a\n # value`) if `ExtensionType.__getattr__` cached the results of unpacking\n # the value. See the comment in `ExtensionType.__getattr__` for details.\n c = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n c = extension_type.pack(c)\n d = control_flow_ops.cond(\n constant_op.constant(False), lambda: array_ops.size(c.mask),\n lambda: array_ops.size(c.values))\n self.assertAllEqual(d, 4)\n\n def testWhileLoop(self):\n x = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n\n cond = lambda i, x: i < 10\n body = lambda i, x: (i + 1, MaskedTensorV1(x.values * 2, x.mask))\n _, y = control_flow_ops.while_loop_v2(cond, body, [0, x])\n\n self.assertIsInstance(y, MaskedTensorV1)\n self.assertAllEqual(y.values, [1024, 2048, 3072, 4096])\n self.assertAllEqual(y.mask, [True, False, True, False])\n\n def testWhileLoopAutograph(self):\n\n @def_function.function\n def fn(x, n):\n for _ in math_ops.range(n):\n x = MaskedTensorV1(x.values * 2, x.mask)\n return x\n\n y = fn(MaskedTensorV1([1, 2, 3, 4], [True, False, True, False]), 10)\n self.assertIsInstance(y, MaskedTensorV1)\n self.assertAllEqual(y.values, [1024, 2048, 3072, 4096])\n self.assertAllEqual(y.mask, [True, False, True, False])\n\n def testWhileLoopTypeMismatch(self):\n x = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n\n cond = lambda i, x: i < 10\n\n def body(i, x):\n if isinstance(x, MaskedTensorV1):\n return x.values * 2\n else:\n return MaskedTensorV1(x, x > i)\n\n with self.assertRaisesRegex(\n ValueError, \"The two structures don't have the same nested structure\"):\n control_flow_ops.while_loop_v2(cond, body, [0, x])\n\n def testWhileLoopPacked(self):\n x = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n x = extension_type.pack(x)\n cond = lambda i, x: i < 10\n\n def body(i, x):\n return i + 1, extension_type.pack(MaskedTensorV2(x.values * 2, x.mask))\n\n _, y = control_flow_ops.while_loop_v2(cond, body, [0, x])\n self.assertIsInstance(y, MaskedTensorV2)\n self.assertAllEqual(y.values, [1024, 2048, 3072, 4096])\n self.assertAllEqual(y.mask, [True, False, True, False])\n\n def testNestedFields(self):\n PossiblyRaggedTensor = typing.Union[ops.Tensor, ragged_tensor.RaggedTensor]\n ToyFeatures = typing.Mapping[str, PossiblyRaggedTensor]\n\n class ToyInfo(extension_type.ExtensionType):\n version: str\n toys: typing.Tuple[typing.Tuple[str, ops.Tensor, ToyFeatures], ...]\n boxes: typing.Mapping[str, ops.Tensor]\n\n authors = [[b'A', b'Aardvark'], [b'Z', b'Zhook']]\n toys = [('car', 1.0, {\n 'size': [8, 3, 2],\n 'color': [0.3, 0.2, 0.8]\n }), ('book', 3.7, {\n 'authors': ragged_factory_ops.constant(authors)\n })]\n boxes = {'green': ['car'], 'blue': ['car', 'book', 'book']}\n toy_info = ToyInfo(version='1.0 alpha', toys=toys, boxes=boxes)\n\n self.assertEqual(toy_info.version, '1.0 alpha')\n self.assertEqual(toy_info.toys[0][0], 'car')\n self.assertIsInstance(toy_info.toys[0][1], ops.Tensor)\n self.assertAllEqual(toy_info.toys[0][1], 1.0)\n self.assertEqual(set(toy_info.toys[0][2].keys()), {'size', 'color'})\n self.assertIsInstance(toy_info.toys[0][2]['size'], ops.Tensor)\n self.assertAllEqual(toy_info.toys[0][2]['size'], [8, 3, 2])\n self.assertIsInstance(toy_info.toys[1][2]['authors'],\n ragged_tensor.RaggedTensor)\n self.assertAllEqual(toy_info.toys[1][2]['authors'], authors)\n self.assertAllEqual(toy_info.boxes['green'], [b'car'])\n self.assertAllEqual(toy_info.boxes['blue'], ['car', 'book', 'book'])\n\n expected_repr = (\n r\"ToyInfo\\(version='1.0 alpha', toys=\\(\"\n r\"\\('car', <tf.Tensor[^>]*>, ImmutableDict\\(\"\n r\"{'size': <tf.Tensor[^>]*>, 'color': <tf.Tensor[^>]*>}\\)\\), \"\n r\"\\('book', <tf.Tensor[^>]*>, ImmutableDict\\(\"\n r\"{'authors': (<tf.RaggedTensor[^>]*>|tf.RaggedTensor\\(.*\\))}\\)\\)\\), \"\n r'boxes=ImmutableDict\\('\n r\"{'green': <tf.Tensor[^>]*>, 'blue': <tf.Tensor[^>]*>}\\)\\)\")\n\n self.assertRegex(repr(toy_info), expected_repr)\n\n def testNestedExtensionTypes(self):\n PossiblyMaskedTensor = typing.Union[ops.Tensor, MaskedTensorV1]\n\n class Toy(extension_type.ExtensionType):\n name: str\n price: ops.Tensor\n features: typing.Mapping[str, PossiblyMaskedTensor]\n\n class Box(extension_type.ExtensionType):\n contents: ops.Tensor\n\n class ToyInfo(extension_type.ExtensionType):\n version: str\n toys: typing.Tuple[Toy, ...]\n boxes: typing.Mapping[str, Box]\n\n authors = MaskedTensorV1(\n values=[[b'A', b'Quincy', b'Aardvark'], [b'Z', b'Zhook', b'']],\n mask=[[True, True, True], [True, True, False]])\n toys = [\n Toy('car', 1.0, {\n 'size': [8, 3, 2],\n 'color': [0.3, 0.2, 0.8]\n }),\n Toy(name='book', price=3.7, features={'authors': authors})\n ]\n boxes = {\n 'green': Box(['car']),\n 'blue': Box(contents=['car', 'book', 'book'])\n }\n toy_info = ToyInfo(version='1.0 alpha', toys=toys, boxes=boxes)\n\n @def_function.function\n def fn(info):\n prices = [toy.price for toy in info.toys]\n return math_ops.reduce_sum(array_ops.stack(prices))\n\n self.assertAllClose(fn(toy_info), 4.7)\n\n def testNestedCustomConstructor(self):\n\n class Toy(extension_type.ExtensionType):\n name: str\n price: ops.Tensor\n\n def __init__(self, name, price, discount=0):\n if discount:\n name += ' (discounted)'\n price *= (1 - discount)\n self.name = name\n self.price = price\n\n class ToyBox(extension_type.ExtensionType):\n toys: typing.Tuple[Toy, ...]\n\n def __init__(self, name_to_price, name_to_discount):\n self.toys = [\n Toy(name, price, name_to_discount.get(name, 0))\n for (name, price) in name_to_price.items()\n ]\n\n toy_box = ToyBox({\n 'car': 8.3,\n 'truck': 5.9,\n 'puzzle': 5.3,\n 'jacks': 2.8\n }, {\n 'puzzle': .2,\n 'truck': .3\n })\n self.assertLen(toy_box.toys, 4)\n self.assertEqual(\n set(toy.name for toy in toy_box.toys),\n {'car', 'truck (discounted)', 'puzzle (discounted)', 'jacks'})\n\n def testExtensionTypeWithMathOperators(self):\n\n def masked_add(x, y, name=None):\n del name\n if not isinstance(x, MaskedTensorV2) and isinstance(y, MaskedTensorV2):\n return dispatch.OpDispatcher.NOT_SUPPORTED\n return MaskedTensorV2(x.values + y.values, x.mask & y.mask)\n\n with temporarily_add_dispatch(math_ops.add, MaskedTensorV2, masked_add):\n x = MaskedTensorV2([[1, 2], [3, 4]], [[True, False], [True, True]])\n y = MaskedTensorV2([[3, 4], [5, 6]], [[True, True], [False, True]])\n z = x + y\n self.assertAllEqual(z.values, [[4, 6], [8, 10]])\n self.assertAllEqual(z.mask, [[True, False], [False, True]])\n\n def testGetExtensionTypeFields(self):\n\n # Can be called on a type or an instance:\n fields_1 = MaskedTensorV1._tf_extension_type_fields()\n fields_2 = MaskedTensorV1([0], [True])._tf_extension_type_fields()\n\n for fields in [fields_1, fields_2]:\n self.assertLen(fields, 2)\n self.assertEqual(fields[0].name, 'values')\n self.assertEqual(fields[0].value_type, ops.Tensor)\n self.assertEqual(fields[0].default, fields[0].NO_DEFAULT)\n self.assertEqual(fields[1].name, 'mask')\n self.assertEqual(fields[1].value_type,\n tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool))\n self.assertEqual(fields[1].default, fields[0].NO_DEFAULT)\n\n def testHasExtensionTypeField(self):\n\n self.assertTrue(MaskedTensorV1._tf_extension_type_has_field('values'))\n self.assertTrue(MaskedTensorV1._tf_extension_type_has_field('mask'))\n self.assertFalse(MaskedTensorV1._tf_extension_type_has_field('labels'))\n\n mt = MaskedTensorV1([0], [True])\n self.assertTrue(mt._tf_extension_type_has_field('values'))\n self.assertTrue(mt._tf_extension_type_has_field('mask'))\n self.assertFalse(mt._tf_extension_type_has_field('labels'))\n\n def testForwardReferences(self):\n A, B = ForwardRefA, ForwardRefB\n\n self.assertEqual(A._tf_extension_type_fields(),\n (extension_type_field.ExtensionTypeField(\n 'x', typing.Tuple[typing.Union[A, B], ...]),\n extension_type_field.ExtensionTypeField('y', B)))\n self.assertEqual(B._tf_extension_type_fields(),\n (extension_type_field.ExtensionTypeField('z', B),\n extension_type_field.ExtensionTypeField('n', ops.Tensor)))\n\n # Check the signature.\n expected_parameters = [\n tf_inspect.Parameter('self',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter(\n 'x',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=typing.Tuple[typing.Union['ForwardRefA', 'ForwardRefB'],\n ...]),\n tf_inspect.Parameter(\n 'y',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation='ForwardRefB'),\n ]\n expected_sig = tf_inspect.Signature(\n expected_parameters, return_annotation=A)\n self.assertEqual(tf_inspect.signature(A.__init__), expected_sig)\n\n def testUnresolvedForwardReference(self):\n\n class Broken(extension_type.ExtensionType):\n x: 'Cra' # note: intentional typo for Car.\n\n class Car(extension_type.ExtensionType):\n speed: float\n\n with self.assertRaises(TypeError):\n Broken(x=Car(3.8))\n\n def testUnsupportedAnnotations(self):\n with self.assertRaisesRegex(\n TypeError, \"In field 'values': Unsupported type annotation\"):\n\n class MyType1(extension_type.ExtensionType): # pylint: disable=unused-variable\n values: typing.List[ops.Tensor]\n\n with self.assertRaisesRegex(TypeError,\n \"In field 'xyz': Unsupported type annotation\"):\n\n class MyType2(extension_type.ExtensionType): # pylint: disable=unused-variable\n xyz: typing.Union[typing.Tuple[complex, ...], int]\n\n def testExtensionTypeBaseClassHasNoSpec(self):\n self.assertFalse(hasattr(extension_type.ExtensionType, 'Spec'))\n\n def testExtensionTypeBaseConstructorRaisesException(self):\n with self.assertRaisesRegex(AssertionError,\n 'ExtensionType is an abstract base class.'):\n extension_type.ExtensionType()\n\n class ExtensionTypeWithName(extension_type.ExtensionType):\n __name__ = 'tf.__test__.ExtensionTypeWithName' # For SavedModel\n x: typing.Tuple[ops.Tensor, int]\n y: ops.Tensor\n\n def testSavedModelSupport(self):\n\n class TestModule(module.Module):\n\n @def_function.function\n def f(self, s):\n return s.x[0] + s.x[1] + s.y\n\n s1 = self.ExtensionTypeWithName((1, 2), 3)\n s2 = self.ExtensionTypeWithName((1.0, 2), [3.0, 4.0])\n\n m = TestModule()\n m.f.get_concrete_function(s1)\n m.f.get_concrete_function(s2)\n\n path = tempfile.mkdtemp(prefix=test.get_temp_dir())\n save.save(m, path)\n loaded = load.load(path)\n\n self.assertAllEqual(loaded.f(s1), 6)\n self.assertAllEqual(loaded.f(s2), [6.0, 7.0])\n\n def testPackedEncoding(self):\n mt1 = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n self.assertLen(nest.flatten(mt1, expand_composites=True), 2)\n\n mt2 = extension_type.pack(mt1)\n self.assertLen(nest.flatten(mt2, expand_composites=True), 1)\n self.assertIsInstance(mt2.values, ops.Tensor)\n self.assertAllEqual(mt2.values, [1, 2, 3, 4])\n self.assertIsInstance(mt2.mask, ops.Tensor)\n self.assertAllEqual(mt2.mask, [True, True, False, True])\n\n mt3 = extension_type.unpack(mt2)\n self.assertLen(nest.flatten(mt3, expand_composites=True), 2)\n self.assertIsInstance(mt3.values, ops.Tensor)\n self.assertAllEqual(mt3.values, [1, 2, 3, 4])\n self.assertIsInstance(mt3.mask, ops.Tensor)\n self.assertAllEqual(mt3.mask, [True, True, False, True])\n\n nest.assert_same_structure(mt1, mt3, expand_composites=True)\n with self.assertRaisesRegex(ValueError, \"don't have the same\"): # pylint: disable=g-error-prone-assert-raises\n nest.assert_same_structure(mt1, mt2, expand_composites=True)\n\n mt4 = MaskedTensorV1([1, 2, 3, 4], [True, True, False, True])\n with self.assertRaisesRegex(\n ValueError,\n 'ExtensionTypes must have a __name__ field in order to be packed.'):\n extension_type.pack(mt4)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ExtensionTypeSpecTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def testSpecConstructor(self):\n values_spec = tensor_spec.TensorSpec([4], dtypes.float32)\n mask_spec = tensor_spec.TensorSpec([4], dtypes.bool)\n mt_spec = MaskedTensorV1.Spec(values_spec, mask_spec)\n self.assertEqual(mt_spec.values, values_spec)\n self.assertEqual(mt_spec.mask, mask_spec)\n\n mt = MaskedTensorV1([1.0, 2.0, 3.0, 4.0], [True, True, False, True])\n self.assertEqual(mt._type_spec, mt_spec)\n\n def testSpecConstructorSignature(self):\n\n class MyType(extension_type.ExtensionType):\n x: ops.Tensor\n y: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n z: typing.Tuple[typing.Union[int, str], ...] = [1, 'two', 3]\n\n expected_parameters = [\n tf_inspect.Parameter('self',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter('x', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter('y', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter('z', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n ]\n expected_sig = tf_inspect.Signature(\n expected_parameters, return_annotation=MyType.Spec)\n self.assertEqual(expected_sig, tf_inspect.signature(MyType.Spec.__init__))\n\n def testSpecAttributesAreImmutable(self):\n mt = MaskedTensorV1([1, 2, 3, 4], [True, True, False, True])\n mt_spec = MaskedTensorV1.Spec.from_value(mt)\n with self.assertRaisesRegex(\n AttributeError, 'Cannot mutate attribute `score` '\n 'outside the custom constructor of ExtensionTypeSpec'):\n mt_spec.score = 12\n with self.assertRaisesRegex(\n AttributeError, 'Cannot mutate attribute `values` '\n 'outside the custom constructor of ExtensionTypeSpec'):\n mt_spec.values = constant_op.constant([4, 3, 2, 1])\n with self.assertRaisesRegex(\n AttributeError, 'Cannot mutate attribute `values` '\n 'outside the custom constructor of ExtensionTypeSpec'):\n del mt_spec.values\n\n def testSpecFromValue(self):\n mt = MaskedTensorV1([1.0, 2.0, 3.0, 4.0], [True, True, False, True])\n mt_spec = MaskedTensorV1.Spec.from_value(mt)\n\n expected_values_spec = tensor_spec.TensorSpec([4], dtypes.float32)\n expected_mask_spec = tensor_spec.TensorSpec([4], dtypes.bool)\n self.assertEqual(mt_spec.values, expected_values_spec)\n self.assertEqual(mt_spec.mask, expected_mask_spec)\n\n def testSpecSerialize(self):\n\n class Zoo(extension_type.ExtensionType):\n zookeepers: typing.Tuple[str, ...]\n animals: typing.Mapping[str, typing.Mapping[str, ops.Tensor]]\n\n featurespec = {\n 'size': tensor_spec.TensorSpec([3]),\n 'weight': tensor_spec.TensorSpec([])\n }\n zoo_spec = Zoo.Spec(\n zookeepers=['Zoey', 'Zack'],\n animals={\n 'tiger': featurespec,\n 'elephant': featurespec\n })\n\n serialized = zoo_spec._serialize()\n self.assertEqual(serialized,\n (('zookeepers', ('Zoey', 'Zack')), ('animals', {\n 'tiger': featurespec,\n 'elephant': featurespec\n })))\n restored = Zoo.Spec._deserialize(serialized)\n self.assertEqual(zoo_spec, restored)\n\n # ImmutableDict is used for the field, but dict for the serialization:\n self.assertIsInstance(zoo_spec.animals, immutable_dict.ImmutableDict)\n serialized_field_name, serialized_field_value = serialized[1]\n self.assertEqual(serialized_field_name, 'animals')\n self.assertIsInstance(serialized_field_value, dict)\n\n def testSpecComponents(self):\n\n class Zoo(extension_type.ExtensionType):\n zookeepers: typing.Tuple[str, ...]\n animals: typing.Mapping[str, typing.Mapping[str, ops.Tensor]]\n\n zoo = Zoo(\n ['Zoey', 'Zack'], {\n 'elephant': {\n 'size': [25, 30, 20],\n 'weight': 2000.0\n },\n 'tiger': {\n 'hunger': 3.2,\n 'size': [3, 8, 2],\n 'weight': 87.3\n }\n })\n zoo_spec = Zoo.Spec.from_value(zoo)\n\n components = zoo_spec._to_components(zoo)\n self.assertLen(components, 5)\n self.assertAllClose(components[0], [25, 30, 20])\n self.assertAllClose(components[1], 2000.0)\n self.assertAllClose(components[2], 3.2)\n self.assertAllClose(components[3], [3, 8, 2])\n self.assertAllClose(components[4], 87.3)\n\n restored = zoo_spec._from_components(components)\n self.assertAllEqual(zoo == restored, True)\n\n self.assertEqual(zoo_spec._component_specs,\n (tensor_spec.TensorSpec([3], dtypes.int32),\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([3], dtypes.int32),\n tensor_spec.TensorSpec([], dtypes.float32)))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass AnonymousExtensionTypeTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n @parameterized.parameters([\n [dict(i=5, f=3.2, b=True, n=None)],\n [dict(x=(1, 2), y={\n 3: 4,\n 5: 6\n })],\n [lambda: dict(t=constant_op.constant(123))],\n [lambda: dict(r=ragged_factory_ops.constant([[1, 2], [3]]))],\n ])\n def testConstruction(self, fields):\n if callable(fields):\n fields = fields()\n extension_type.AnonymousExtensionType(**fields)\n\n @parameterized.parameters([\n [dict(x=[1, 2, 3]), 'unsupported `value` argument'],\n [dict(x=set([1, 2])), 'unsupported `value` argument'],\n [dict(x=(1, dict([(2, [])]))), 'unsupported `value` argument'],\n [\n dict(_tf_extension_type_xyz=5),\n 'Reserved field name .*_tf_extension_type_xyz.*'\n ],\n ])\n def testConstructionErrors(self, fields, error):\n with self.assertRaisesRegex(ValueError, error):\n extension_type.AnonymousExtensionType(**fields)\n\n @parameterized.parameters([\n [dict(i=5, f=3.2, b=True, n=None)],\n [dict(x=(1, 2), y={\n 3: 4,\n 5: 6\n })],\n [lambda: dict(t=constant_op.constant(123))],\n [lambda: dict(r=ragged_factory_ops.constant([[1, 2], [3]]))],\n ])\n def testAttributeAccessors(self, fields):\n if callable(fields):\n fields = fields()\n s = extension_type.AnonymousExtensionType(**fields)\n for (name, value) in fields.items():\n actual = getattr(s, name)\n if isinstance(actual, (ops.Tensor, ragged_tensor.RaggedTensor)):\n self.assertAllEqual(actual, value)\n else:\n self.assertEqual(actual, value)\n\n def testAttributeAccessorsAreImmutable(self):\n s = extension_type.AnonymousExtensionType(x=12, y={'x': 55})\n with self.assertRaisesRegex(AttributeError, 'Cannot set attribute `x`'):\n s.x = 22\n with self.assertRaisesRegex(AttributeError, 'Cannot delete attribute `y`'):\n del s.y\n with self.assertRaisesRegex(TypeError, 'does not support item assignment'):\n s.y['x'] = 66\n\n def testReinterpret(self):\n x = MaskedTensorV2([4, 5], [True, False])\n anon_x = extension_type.reinterpret(x,\n extension_type.AnonymousExtensionType)\n self.assertAllEqual(anon_x.values, [4, 5])\n self.assertAllEqual(anon_x.mask, [True, False])\n\n round_trip_x = extension_type.reinterpret(anon_x, MaskedTensorV2)\n self.assertAllEqual(round_trip_x.values, [4, 5])\n self.assertAllEqual(round_trip_x.mask, [True, False])\n\n converted_x = extension_type.reinterpret(anon_x, MaskedTensorV1)\n self.assertAllEqual(converted_x.values, [4, 5])\n self.assertAllEqual(converted_x.mask, [True, False])\n\n # pylint: disable=g-long-lambda\n @parameterized.parameters([\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([1, 2, 3])), MaskedTensorV2,\n \"Missing required fields: {'mask'}\"\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=(1, 2, 3), mask=None), MaskedTensorV2,\n 'mask: expected a tf.bool Tensor, got None'\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([[1, 2], [3, 4]]),\n mask=ragged_factory_ops.constant([[1, 2], [3]])), MaskedTensorV2,\n 'mask: expected a tf.bool Tensor'\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([1, 2, 3]),\n mask=constant_op.constant([True, False])), MaskedTensorV2,\n 'Shapes .* are incompatible'\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([1, 2, 3])), ops.Tensor,\n 'reinterpret expects `new_type` to be a subclass of '\n 'tf.ExtensionType; '\n 'got .*.Tensor.*'\n ],\n [\n lambda: constant_op.constant([1, 2, 3]),\n extension_type.AnonymousExtensionType,\n 'reinterpret expects `value` to be a tf.ExtensionType instance; '\n 'got.*.Tensor.*'\n ],\n ])\n def testReinterpretErrors(self, value, new_type, error):\n if callable(value):\n value = value()\n with self.assertRaisesRegex((TypeError, ValueError), error):\n extension_type.reinterpret(value, new_type)\n\n def testLoadSavedModelWithUnregisteredExtensionType(self):\n\n def f(x, y):\n x_values = x.values if isinstance(x, MaskedTensorV1) else x\n y_values = y.values if isinstance(y, MaskedTensorV1) else y\n x_mask = x.mask if isinstance(x, MaskedTensorV1) else True\n y_mask = y.mask if isinstance(y, MaskedTensorV1) else True\n return MaskedTensorV1(x_values + y_values, x_mask & y_mask)\n\n t_spec = tensor_spec.TensorSpec(None, dtypes.int32)\n b_spec = tensor_spec.TensorSpec(None, dtypes.bool)\n mt_spec = MaskedTensorV1.Spec(values=t_spec, mask=b_spec)\n model = module.Module()\n model.f = def_function.function(f)\n model.f.get_concrete_function(t_spec, t_spec)\n model.f.get_concrete_function(t_spec, mt_spec)\n model.f.get_concrete_function(mt_spec, t_spec)\n model.f.get_concrete_function(mt_spec, mt_spec)\n\n path = tempfile.mkdtemp(prefix=test.get_temp_dir())\n with temporarily_register_type_spec('tf.test.MaskedTensorV1.Spec',\n MaskedTensorV1.Spec):\n save.save(model, path)\n loaded_model = load.load(path)\n\n with self.assertRaises(ValueError):\n type_spec.lookup('tf.test.MaskedTensorV1')\n\n t = constant_op.constant([10, 20, 30])\n v1 = loaded_model.f(t, t)\n self.assertIsInstance(v1, extension_type.AnonymousExtensionType)\n self.assertAllEqual(v1.values, [20, 40, 60])\n self.assertAllEqual(v1.mask, True)\n\n v2 = loaded_model.f(v1, v1)\n self.assertIsInstance(v2, extension_type.AnonymousExtensionType)\n self.assertAllEqual(v2.values, [40, 80, 120])\n self.assertAllEqual(v2.mask, True)\n\n mt = MaskedTensorV1([1, 2, 3], [True, True, False])\n v3 = loaded_model.f(\n t, extension_type.reinterpret(mt,\n extension_type.AnonymousExtensionType))\n self.assertIsInstance(v3, extension_type.AnonymousExtensionType)\n self.assertAllEqual(v3.values, [11, 22, 33])\n self.assertAllEqual(v3.mask, [True, True, False])\n\n v4 = extension_type.reinterpret(v3, MaskedTensorV1)\n self.assertIsInstance(v4, MaskedTensorV1)\n self.assertAllEqual(v4.values, [11, 22, 33])\n self.assertAllEqual(v4.mask, [True, True, False])\n\n\ndef replace_tensors_with_placeholders(value):\n\n def repl(x):\n if isinstance(x, ops.Tensor):\n return array_ops.placeholder_with_default(x, shape=None)\n else:\n return x\n\n return nest.map_structure(repl, value, expand_composites=True)\n\n\[email protected]\ndef temporarily_add_dispatch(op, typ, fn):\n n = len(op._tf_dispatchers)\n dispatch.dispatch_for_types(op, typ)(fn)\n yield\n assert len(op._tf_dispatchers) == n + 1\n del op._tf_dispatchers[-1]\n\n\[email protected]\ndef temporarily_register_type_spec(name, cls):\n \"\"\"Context manager for making temporary changes to the TypeSpec registry.\"\"\"\n type_spec.register(name)(cls)\n yield\n assert type_spec._TYPE_SPEC_TO_NAME.pop(cls) == name\n assert type_spec._NAME_TO_TYPE_SPEC.pop(name) is cls\n\n\nif __name__ == '__main__':\n googletest.main()\n"
] | [
[
"tensorflow.python.framework.type_spec.lookup",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.module.module.Module",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.ops.control_flow_ops.while_loop_v2",
"tensorflow.python.util.tf_inspect.Parameter",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.framework.extension_type.unpack",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.framework.extension_type_field.ExtensionTypeField",
"tensorflow.python.framework.type_spec._NAME_TO_TYPE_SPEC.pop",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.util.tf_inspect.signature",
"tensorflow.python.framework.type_spec._TYPE_SPEC_TO_NAME.pop",
"tensorflow.python.framework.extension_type.reinterpret",
"tensorflow.python.util.tf_inspect.Signature",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.framework.extension_type.AnonymousExtensionType",
"tensorflow.python.platform.test.get_temp_dir",
"tensorflow.python.framework.extension_type.ExtensionType",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.framework.extension_type.pack",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.type_spec.register",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.util.dispatch.dispatch_for_types",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.extension_type.is_packed",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.nest.pack_sequence_as"
]
] |
mavroudisv/acme | [
"3eb4d55a67ea460710ba9e2b2ecf1aa339ba7d2d"
] | [
"acme/agents/tf/d4pg/agent_test.py"
] | [
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the D4PG agent.\"\"\"\n\nimport sys\nfrom typing import Dict, Sequence\n\nfrom absl.testing import absltest\nimport acme\nfrom acme import specs\nfrom acme import types\nfrom acme.agents.tf import d4pg\nfrom acme.testing import fakes\nfrom acme.tf import networks\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\n\ndef make_networks(\n action_spec: types.NestedSpec,\n policy_layer_sizes: Sequence[int] = (10, 10),\n critic_layer_sizes: Sequence[int] = (10, 10),\n vmin: float = -150.,\n vmax: float = 150.,\n num_atoms: int = 51,\n) -> Dict[str, snt.Module]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n policy_layer_sizes = list(policy_layer_sizes) + [num_dimensions]\n\n policy_network = snt.Sequential(\n [networks.LayerNormMLP(policy_layer_sizes), tf.tanh])\n critic_network = snt.Sequential([\n networks.CriticMultiplexer(\n critic_network=networks.LayerNormMLP(\n critic_layer_sizes, activate_final=True)),\n networks.DiscreteValuedHead(vmin, vmax, num_atoms)\n ])\n\n return {\n 'policy': policy_network,\n 'critic': critic_network,\n }\n\n\nclass D4PGTest(absltest.TestCase):\n\n def test_d4pg(self):\n # Create a fake environment to test with.\n environment = fakes.ContinuousEnvironment(episode_length=10, bounded=True)\n spec = specs.make_environment_spec(environment)\n\n # Create the networks.\n agent_networks = make_networks(spec.actions)\n\n # Construct the agent.\n agent = d4pg.D4PG(\n environment_spec=spec,\n policy_network=agent_networks['policy'],\n critic_network=agent_networks['critic'],\n batch_size=10,\n samples_per_insert=2,\n min_replay_size=10,\n )\n\n # Try running the environment loop. We have no assertions here because all\n # we care about is that the agent runs without raising any errors.\n loop = acme.EnvironmentLoop(environment, agent)\n loop.run(num_episodes=2)\n # Imports check\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"numpy.prod"
]
] |
acgtcoder/lcdblib | [
"a3e5c81b841f0a06e63641f1cbcc24fc207f40f0"
] | [
"lcdblib/parse/rseqc.py"
] | [
"import pandas as pd\nimport re\nfrom collections import OrderedDict\n\ndef parse_inferExperiment(sample, file):\n \"\"\"Parse rseqc infer expeirment.\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n parsed = OrderedDict()\n for l in fh:\n fqs = re.search(r\"^(.+?):\\s+([\\d\\.]+)$\", l)\n if fqs:\n parsed[fqs.group(1)] = float(fqs.group(2))\n\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n\n\ndef parse_geneBodyCoverage(sample, file):\n \"\"\"Parse rseqc genebody coverage.\n\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n lines = fh.readlines()\n header = lines[0].strip().split('\\t')[1:]\n values = lines[1].strip().split('\\t')[1:]\n parsed = OrderedDict()\n for k, v in zip(header, values):\n parsed[int(k)] = float(v)\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n\n\ndef parse_bamStat(sample, file):\n \"\"\"Parse rseqc bam stat.\n\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n parsed = OrderedDict()\n for l in fh:\n fqs = re.search(r\"^(.+?):\\s*(\\d+)$\", l)\n if fqs:\n parsed[fqs.group(1)] = int(fqs.group(2))\n\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n\n\ndef parse_tin(sample, file):\n \"\"\"Parse rseqc tin.\n\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n lines = fh.readlines()\n header = lines[0].strip().split('\\t')[1:]\n values = lines[1].strip().split('\\t')[1:]\n parsed = OrderedDict()\n for k, v in zip(header, values):\n parsed[k] = float(v)\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n"
] | [
[
"pandas.DataFrame"
]
] |
msarahan/ml_smoketest | [
"b7dbebb99b571b4af04bdaa7513817b14c10f63f"
] | [
"net_surgery.py"
] | [
"\n# coding: utf-8\n\nimport os\nimport subprocess\nimport numpy as np\nimport caffe\n\n\ndef main():\n if os.path.isfile('../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):\n print('CaffeNet found.')\n else:\n print('Downloading pre-trained CaffeNet model...')\n subprocess.call(['python', \"../scripts/download_model_binary.py\",\n \"../models/bvlc_reference_caffenet\"])\n # Load the net, list its data and params, and filter an example image.\n caffe.set_mode_cpu()\n net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST)\n print(\"blobs {}\\nparams {}\".format(net.blobs.keys(), net.params.keys()))\n\n # load image and prepare as a single input batch for Caffe\n im = np.array(caffe.io.load_image('images/cat_gray.jpg', color=False)).squeeze()\n\n im_input = im[np.newaxis, np.newaxis, :, :]\n net.blobs['data'].reshape(*im_input.shape)\n net.blobs['data'].data[...] = im_input\n\n # helper show filter outputs\n def show_filters(net):\n net.forward()\n filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()\n\n # filter the image with initial\n show_filters(net)\n\n # pick first filter output\n conv0 = net.blobs['conv'].data[0, 0]\n print(\"pre-surgery output mean {:.2f}\".format(conv0.mean()))\n # set first filter bias to 1\n net.params['conv'][1].data[0] = 1.\n net.forward()\n print(\"post-surgery output mean {:.2f}\".format(conv0.mean()))\n\n ksize = net.params['conv'][0].data.shape[2:]\n # make Gaussian blur\n sigma = 1.\n y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]\n g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))\n gaussian = (g / g.sum()).astype(np.float32)\n net.params['conv'][0].data[0] = gaussian\n # make Sobel operator for edge detection\n net.params['conv'][0].data[1:] = 0.\n sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))\n net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal\n net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical\n show_filters(net)\n\n # Load the original network and extract the fully connected layers' parameters.\n net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt',\n '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n caffe.TEST)\n params = ['fc6', 'fc7', 'fc8']\n # fc_params = {name: (weights, biases)}\n fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}\n\n for fc in params:\n print('{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape))\n\n # Load the fully convolutional network to transplant the parameters.\n net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt',\n '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n caffe.TEST)\n params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']\n # conv_params = {name: (weights, biases)}\n conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}\n\n for conv in params_full_conv:\n print('{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape))\n\n for pr, pr_conv in zip(params, params_full_conv):\n conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays\n conv_params[pr_conv][1][...] = fc_params[pr][1]\n\n\n net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel')\n\n # load input and configure preprocessing\n im = caffe.io.load_image('images/cat.jpg')\n transformer = caffe.io.Transformer({'data': net_full_conv.blobs['data'].data.shape})\n transformer.set_mean('data', np.load('../python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))\n transformer.set_transpose('data', (2,0,1))\n transformer.set_channel_swap('data', (2,1,0))\n transformer.set_raw_scale('data', 255.0)\n # make classification map by forward and print prediction indices at each location\n out = net_full_conv.forward_all(data=np.asarray([transformer.preprocess('data', im)]))\n print(out['prob'][0].argmax(axis=0))\n # show net input and confidence map (probability of the top prediction at each location)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.load",
"numpy.exp"
]
] |
asamadiya/onnxruntime | [
"6b3645d97ab222d28bd515f4990af8868194eb52"
] | [
"onnxruntime/test/python/quantization/test_op_concat.py"
] | [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport unittest\nimport numpy as np\nfrom onnx import helper, TensorProto, numpy_helper, save\nfrom onnxruntime.quantization import quantize_static, QuantFormat\nfrom op_test_utils import InputFeedsNegOneZeroOne, check_model_correctness, check_op_type_count\n\n\nclass TestONNXModel(unittest.TestCase):\n def construct_model(self, model_path):\n # (input)\n # / | \\\n # / | \\\n # / | \\\n # / | \\\n # Conv(1) Conv(2) conv(3)\n # \\ | /\n # \\ | /\n # \\ | /\n # Concat\n # |\n # Identity\n # |\n # (output)\n initializers = []\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 15, 15])\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 13, 13, 13])\n\n # Conv1 output [1, 2, 13, 13]\n conv1_weight_initializer = numpy_helper.from_array(\n np.random.randint(-1, 2, [2, 3, 3, 3]).astype(np.float32), name='conv1_weight')\n conv1_node = helper.make_node('Conv', ['input', 'conv1_weight'], ['conv1_output'], name='conv1_node')\n\n # Conv2 output [1, 5, 13, 13]\n conv2_weight_initializer = numpy_helper.from_array(\n np.random.randint(-1, 2, [5, 3, 3, 3]).astype(np.float32), name='conv2_weight')\n conv2_node = helper.make_node('Conv', ['input', 'conv2_weight'], ['conv2_output'], name='conv2_node')\n\n # Conv3 output [1, 6, 13, 13]\n conv3_weight_initializer = numpy_helper.from_array(\n np.random.randint(-1, 2, [6, 3, 3, 3]).astype(np.float32), name='conv3_weight')\n conv3_node = helper.make_node('Conv', ['input', 'conv3_weight'], ['conv3_output'], name='conv3_node')\n\n concat_node = helper.make_node('Concat', ['conv1_output', 'conv2_output', 'conv3_output'], [\n 'concat_output'], name='concat_node', axis=1)\n\n identity_node = helper.make_node('Identity', ['concat_output'], ['output'], name='identity_node')\n\n initializers = [conv1_weight_initializer, conv2_weight_initializer, conv3_weight_initializer]\n graph = helper.make_graph([conv1_node, conv2_node, conv3_node, concat_node, identity_node],\n 'qlinear_concat_op_test', [input], [output], initializer=initializers)\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n save(model, model_path)\n\n def test_quantize_concat(self):\n np.random.seed(1)\n\n model_fp32_path = 'concat_fp32.onnx'\n model_uint8_path = 'concat_uint8.onnx'\n model_uint8_qdq_path = 'concat_uint8_qdq.onnx'\n\n self.construct_model(model_fp32_path)\n\n # Verify QOperator mode\n data_reader = InputFeedsNegOneZeroOne(1, {'input': [1, 3, 15, 15]})\n quantize_static(model_fp32_path, model_uint8_path, data_reader)\n\n qnode_counts = {'QLinearConv': 3, 'QuantizeLinear': 1, 'DequantizeLinear': 1, 'QLinearConcat': 1}\n check_op_type_count(self, model_uint8_path, **qnode_counts)\n data_reader.rewind()\n check_model_correctness(self, model_fp32_path, model_uint8_path, data_reader.get_next())\n\n # Verify QDQ mode\n data_reader.rewind()\n quantize_static(model_fp32_path, model_uint8_qdq_path, data_reader, quant_format=QuantFormat.QDQ)\n qdqnode_counts = {'Conv': 3, 'QuantizeLinear': 5, 'DequantizeLinear': 8, 'Concat': 1}\n check_op_type_count(self, model_uint8_qdq_path, **qdqnode_counts)\n data_reader.rewind()\n check_model_correctness(self, model_fp32_path, model_uint8_qdq_path, data_reader.get_next())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.randint",
"numpy.random.seed"
]
] |
HarmanDotpy/GeNeVA | [
"26042826d37206cc9ccd9fbeee5bfcae95dda5a6"
] | [
"geneva/models/image_encoder.py"
] | [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Image encoder using ResBlocks\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom geneva.definitions.res_blocks import ResDownBlock\n\n\nclass ImageEncoder(nn.Module):\n def __init__(self, cfg):\n \"\"\"Encodes Image to 16x16 features maps of depth 256\n , return the 16x16 features as well as the global sum\n pooled features(shape=512)\"\"\"\n super().__init__()\n self.encode_image = cfg.use_fg\n\n if self.encode_image:\n if cfg.img_encoder_type == 'res_blocks':\n self.image_encoder = nn.Sequential(\n # 3 x 128 x 128\n ResDownBlock(3, 64, downsample=True,\n use_spectral_norm=False),\n # 64 x 64 x 64\n nn.BatchNorm2d(64),\n ResDownBlock(64, 128, downsample=True,\n use_spectral_norm=False),\n # 128 x 32 x 32\n nn.BatchNorm2d(128),\n ResDownBlock(128, cfg.image_feat_dim,\n downsample=True,\n use_spectral_norm=False),\n nn.BatchNorm2d(cfg.image_feat_dim),\n # 256 x 16 x 16\n )\n elif cfg.img_encoder_type == 'conv':\n self.image_encoder = nn.Sequential(\n nn.Conv2d(3, 64, 4, 2, 1, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(64, 128, 4, 2, 1, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.Conv2d(128, cfg.image_feat_dim, 4, 2, 1,\n bias=False),\n nn.BatchNorm2d(cfg.image_feat_dim),\n )\n\n self.object_detector = nn.Linear(cfg.image_feat_dim,\n cfg.num_objects)\n\n self.cfg = cfg\n\n def forward(self, img):\n if not self.encode_image:\n return None, None, None\n\n image_features = self.image_encoder(img)\n pooled_features = torch.sum(image_features, dim=(2, 3))\n\n object_detections = F.sigmoid(self.object_detector(pooled_features))\n return image_features, pooled_features, object_detections\n"
] | [
[
"torch.sum",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.ReLU"
]
] |
evenlwanvik/TTK-4900-Master | [
"172b444c44f65941ea162c64bc917924fc8e996b"
] | [
"src/training_data.py"
] | [
"from training_data.eddies import eddy_detection,dataframe_eddies,plot_eddies,julianh2gregorian\nfrom tools.machine_learning import sliding_window\nfrom matplotlib.patches import Rectangle\nfrom tools.load_nc import load_netcdf4\nfrom numpy import savez_compressed\nimport matplotlib.pyplot as plt\nfrom tools.bfs import bfs\nfrom datetime import date\nfrom math import cos, pi\nfrom operator import eq\nimport tools.dim as dim\nfrom tools import gui\nimport xarray as xr\nimport numpy as np\nimport itertools\nimport argparse\nimport datetime\nimport logging\nimport random\nimport cv2\nimport os\nimport io\nimport sys\n\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom keras.models import load_model\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport pdb\n\nargp = argparse.ArgumentParser()\nargp.add_argument(\"-fd\", \"--fDir\", default='C:/Master/data/cmems_data/global_10km/', help=\"CMEMS grid data directory path\")\n#argp.add_argument(\"-fd\", \"--fDir\", default='D:/Master/data/cmems_data/global_10km/', help=\"CMEMS grid data directory path\")\nargp.add_argument(\"-rs\", \"--size\", default=1.3, help=\"rectangular patche size multiplier\")\nargp.add_argument(\"-sd\", \"--savedir\", default='C:/Master/TTK-4900-Master/data/', help=\"training data save dir\")\n#argp.add_argument(\"-sd\", \"--savedir\", default='D:/Master/TTK-4900-Master/data/', help=\"training data save dir\")\nargs = argp.parse_args()\n\nlogPath = f\"{os.path.dirname(os.path.realpath(__file__))}/training_data/log\"\nlogName = f\"{datetime.datetime.now().strftime('%d%m%Y_%H%M')}.log\"\n\nif not os.path.exists(logPath):\n os.makedirs(logPath)\n\n# create logger \nlogger = logging.getLogger(\"Training Data\")\nlogger.setLevel(logging.INFO)\n# create file handler \nfh = logging.FileHandler(\"{0}/{1}\".format(logPath, logName))\nfh.setLevel(logging.INFO)\n# create console handler\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n# create formatter and add it to the handlers\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\ndef lon2km(lon, lat):\n \"\"\" Convert from longitudinal displacement to km \"\"\"\n return lon * 111.320e3 * cos(lat)\n \n\ndef lat2km(lat):\n \"\"\" Convert from latitudinal displacement to km \"\"\"\n return 110.54e3 * lat\n\n\ndef index_list(ncols,nrows):\n \"\"\"\" Create an array of dimension nrows x ncols with indeces as values \"\"\"\n return [[(i,j) for j in range(nrows)] for i in range(ncols)]\n\n\ndef random_grids(arr, nOut):\n \"\"\" Get nOut random grids from arr \"\"\"\n nTot = dim.shape(arr)[0]\n x = random.sample(range(nTot), nOut)\n return [ arr[i] for i in x ]\n\n\ndef create_subgrids(arr, nrows, ncols, inner=1):\n \"\"\"\n Return an array of shape (n, nrows, ncols) where\n n * nrows * ncols = arr.size\n If arr is a 2D array, the returned array should look like n subblocks with\n each subblock preserving the \"physical\" layout of arr.\n 'inner' tells the dimension of the array elements, i.e. 2 if tuple, 1 if single element\n \"\"\"\n h, w = dim.shape(arr)[0:2]\n arr = np.array(arr)\n assert h % nrows == 0, \"{} rows is not evenly divisble by {}\".format(h, nrows)\n assert w % ncols == 0, \"{} cols is not evenly divisble by {}\".format(w, ncols)\n return (arr.reshape(h//nrows, nrows, -1, ncols, inner)# last 2 is because array consists of 2d idx\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols, inner)) \n\n\ndef plot_grids(data, lon, lat, larger_grid=None, title=\"__\"):\n #\"quickscript\" to plot and investigate images\n\n shape = data[1].shape\n # needs to be larger than 2x2\n if shape[0] < 2 or shape[1] < 2: \n return 'No'\n\n fig, axs = plt.subplots(2, 2, figsize=(12, 8))\n\n # levels for the phase angle to make it not interpolate \n levels = MaxNLocator(nbins=10).tick_values(data[4].min(), data[4].max())\n cmap = plt.get_cmap('CMRmap')\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n axs[0,0].pcolormesh(lon, lat, data[4].T, cmap=cmap, norm=norm)\n\n axs[0,1].contourf(lon, lat, data[1].T, 20, cmap='rainbow')\n n=-1\n color_array = np.sqrt(((data[2]-n)/2)**2 + ((data[3]-n)/2)**2)\n axs[1,0].quiver(lon, lat, data[2].T, data[3].T, color_array, scale=3) # Plot vector field\n if larger_grid is not None:\n axs[1,1].contourf(larger_grid[0], larger_grid[1], larger_grid[2].T, 20, cmap='rainbow') # show a larger parcel to analyze the surroundings\n #axs[1,2].contourf(lon, lat, data[5].T, 10) # Or plot the OW values\n \n fig.suptitle(title, fontsize=16)\n\n guiEvent, guiValues = gui.show_figure(fig)\n plt.close(fig)\n\n return guiEvent\n\n\ndef eddy_metrics(eddies_ma, centerIdxs, lon, lat):\n \"\"\" \n Finds metrics such as index centroid and diameter of eddy \n\n Parameters:\n ----------\n eddies_ma : masked array\n masked array received from the OW-R2 eddy_detection algorithm\n centereIdxs : tuple\n tuple with (lon,lat) indeces of the center coordinates of eddy\n \n returns:\n ----------\n float: diameter of the eddy\n tuple: (lon,lat) index of center\n \"\"\"\n start = centerIdxs[0], centerIdxs[1]\n neighbors = (-1, 0), (0, +1), (+1, 0), (0, -1) # possible neighbors\n similar = eq # Eq method test the equality of the values.\n\n # Run BFS to find the indexes in eddy from the masked array\n eddyIdxs = np.array( list( bfs(eddies_ma, neighbors, start, similar) ) )\n\n # Find center lon/lat index of eddy\n lonCtrIdx = int( eddyIdxs[:,0].mean() )\n latCtrIdx = int( eddyIdxs[:,1].mean() )\n\n # Find the displacement in lon/lat direction in km, and use the largest as diameter of eddy.\n lonDiameter_km = lon2km( eddyIdxs[:,0].max()-eddyIdxs[:,0].min(), lat[latCtrIdx]) * 0.083 \n latDiameter_km = lat2km( eddyIdxs[:,1].max()-eddyIdxs[:,1].min() ) * 0.083 # 0.083 degrees resolution per index\n\n largest_diameter_km = np.max([lonDiameter_km, latDiameter_km])\n\n return largest_diameter_km, (lonCtrIdx, latCtrIdx)\n\n\ndef check_cyclone(flag):\n # positive 1 denotes positive rotation (counter-clockwise), which is a cyclone in the norther hemisphere\n if flag==1: return \"cyclone\"\n elif flag==-1: return \"anticyclone\"\n else: return \"nothing\"\n\n\ndef save_npz_array(ds, savedir=args.savedir):\n # If folder doesn't exist, create folder and just save the data for the first day\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n savez_compressed( f'{savedir}/sst_train.npz', ds[0])\n savez_compressed( f'{savedir}/ssl_train.npz', ds[1])\n savez_compressed( f'{savedir}/uvel_train.npz', ds[2])\n savez_compressed( f'{savedir}/vvel_train.npz', ds[3])\n savez_compressed( f'{savedir}/phase_train.npz', ds[4])\n # If not, we open and append to the existing data\n else:\n with np.load(f'{savedir}/sst_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed( f'{savedir}/sst_train.npz', np.append(data['arr_0'], ds[0], axis=0))\n with np.load(f'{savedir}/ssl_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed( f'{savedir}/ssl_train.npz', np.append(data['arr_0'], ds[1], axis=0))\n with np.load(f'{savedir}/uvel_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed(f'{savedir}/uvel_train.npz', np.append(data['arr_0'], ds[2], axis=0))\n with np.load(f'{savedir}/vvel_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed(f'{savedir}/vvel_train.npz', np.append(data['arr_0'], ds[3], axis=0))\n with np.load(f'{savedir}/phase_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed(f'{savedir}/phase_train.npz', np.append(data['arr_0'], ds[4], axis=0))\n\n\ndef semi_automatic_training():\n \"\"\" This appplication lets you maneuver through windows selected by the OW process.\n In short: OW below the OW_start threshold, which passes the R2_cirterion are considered eddies,\n have a look at https://github.com/JASaa/eddies-R2 for more info about the sample selection process\"\"\"\n\n # Loop through every netcdf file in directory, usually they are spaced by 5 days\n for fName in os.listdir(args.fDir):\n \n if not fName.endswith(\".nc\"):\n continue\n \n logger.info(\"loading netcdf\")\n\n # load data\n (ds,t,lon,lat,depth,uvel_full,vvel_full,sst_full,ssl_full) = load_netcdf4(args.fDir + fName)\n\n # Confidence level, usually 90%\n R2_criterion = 0.90\n\n # OW value at which to begin the evaluation of R2, default was -1, want to use -8 to be absolutely sure\n OW_start = -6.0\n\n # Number of local minima to evaluate using R2 method.\n # Set low (like 20) to see a few R2 eddies quickly.\n # Set high (like 1e5) to find all eddies in domain.\n max_evaluation_points = 100000 \n\n # Minimum number of cells required to be identified as an eddie.\n min_eddie_cells = 3 # set to 3 to be coherent with the use of the R2 method, 3 points seems like a reasonable minimun for a correlation \n\n # z-level to plot. Usually set to 0 for the surface.\n k_plot = 0\n\n dlon = abs(lon[0]-lon[1])\n dlat = abs(lat[0]-lat[1])\n\n # Create eddy images for each day in datase\n #for day, time in enumerate(t):\n # Shuffle the time so that the expert won't see the same long-lasting eddies\n\n for day in random.sample(range(0, len(t)), len(t)): \n\n dateStr = \"{:%d-%m-%Y}\".format(datetime.date(1950, 1, 1) + datetime.timedelta(hours=float(t[day])) )\n logger.info(f\"Creating images for dataset {dateStr}\")\n\n # create a text trap\n text_trap = io.StringIO()\n sys.stdout = text_trap\n\n # Run the OW-R2 algorithm\n lon,lat,u,v,vorticity,OW,OW_eddies,eddie_census,nEddies,circulation_mask = eddy_detection(\n lon,lat,depth,uvel_full,vvel_full,day,R2_criterion,OW_start,max_evaluation_points,min_eddie_cells)\n\n # restore stdout\n sys.stdout = sys.__stdout__\n\n sst_train = []\n ssl_train = []\n uvel_train = []\n vvel_train = []\n phase_train = []\n nDataset = 5\n\n # =========================================================\n # ============== Prepare datasets and lists ===============\n # =========================================================\n\n eddyCtrIdx = []\n for i in range(0,nEddies):\n lonIdx = np.argmax(lon>eddie_census[2,i])-1\n latIdx = np.argmax(lat>eddie_census[3,i])-1\n eddyCtrIdx.append( (lonIdx, latIdx) )\n\n # Netcdf uses (lat,lon) we want to use (lon,lat) and discard the depth\n sst = sst_full[day,:,:].T\n ssl = ssl_full[day,:,:].T\n uvel = uvel_full[day,0,:,:].T\n vvel = vvel_full[day,0,:,:].T\n # Calculate the phase angle (direction) of the current\n with np.errstate(all='ignore'): # Disable zero div warning\n phase = xr.ufuncs.rad2deg( xr.ufuncs.arctan2(vvel, uvel) ) + 180\n OW = OW[:,:,0]\n nLon = len(lon)\n nLat = len(lat)\n\n datasets = (sst, ssl, uvel, vvel, phase, OW) \n \n # =========================================================\n # ======= Create rectangular patches around eddies ========\n # =========================================================\n\n logger.info(f\"+ Creating rectangles for {nEddies} eddies\")\n\n savedImgCounter = 0 # saved image counter for file ID\n for eddyId, ctrIdx in enumerate(eddyCtrIdx): # nEddies\n\n ctrCoord = lon[ctrIdx[0]], lat[ctrIdx[1]]\n diameter_km = eddie_census[5][eddyId]\n\n bfs_diameter_km, bfs_center = eddy_metrics(OW_eddies, ctrIdx, lon, lat)\n\n # Positive rotation (counter-clockwise) is a cyclone in the northern hemisphere because of the coriolis effect\n if (eddie_census[1][eddyId] > 0.0): cyclone = 1 # 1 is a cyclone, 0 is nothing and -1 is anti-cyclone (negative rotation)\n else: cyclone = -1\n\n logger.info(f\"+++ Creating rectangles for {check_cyclone(cyclone)} with center {ctrCoord} and diameter {diameter_km}\")\n \n # Find rectangle metrics\n height = args.size * abs(diameter_km / 110.54) # 1 deg = 110.54 km, 1.2 to be sure the image covers the eddy\n width = args.size * abs(diameter_km / (111.320 * cos(lat[ctrIdx[1]]))) # 1 deg = 111.320*cos(latitude) km, using center latitude as ref\n\n lon_bnds = ctrCoord[0]-width/2.0, ctrCoord[0]+width/2.0\n lat_bnds = ctrCoord[1]-height/2.0, ctrCoord[1]+height/2.0\n \n # Indeces of current eddy image\n lonIdxs = np.where((lon >= lon_bnds[0]) & (lon <= lon_bnds[1]))[0]\n latIdxs = np.where((lat >= lat_bnds[0]) & (lat <= lat_bnds[1]))[0]\n\n eddy_data = np.array([np.zeros((lonIdxs.size,latIdxs.size)) for _ in range(6)])\n \n # Plot and flag to save eddy\n #add = plot_grids(eddy_data, lo, la, title)\n\n #-------- Move closer to center of eddy ------------\n\n title = dateStr + \"_\" + check_cyclone(cyclone)\n\n choices = ('Center', 'incLon', 'incLat', 'decLon', 'decLat')\n response = 'Center'\n #response = 'Yes' # Skip this section for debugging non-eddy section\n while response in choices:\n\n lo = lon[lonIdxs]\n la = lat[latIdxs]\n\n for i, loIdx in enumerate(lonIdxs):\n for j, laIdx in enumerate(latIdxs):\n for k, measurement in enumerate(datasets): # for every measurement type in datasets\n eddy_data[k,i,j] = measurement[loIdx,laIdx]\n\n # Store a larger grid to make it easier to see if we have an eddy and if we should center image \n if (lonIdxs[0]-5 < 0 or lonIdxs[-1]+5 >= nLon) or (latIdxs[0]-3 < 0 or latIdxs[-1]+3 >= nLat):\n larger_grid = None\n else:\n larger_grid = [ np.zeros(lonIdxs.size+10), np.zeros(latIdxs.size+6), \n np.zeros((lonIdxs.size+10,latIdxs.size+6)), ]\n for i, loIdx in enumerate(range(lonIdxs[0]-5, lonIdxs[-1]+6)):\n for j, laIdx in enumerate(range(latIdxs[0]-3, latIdxs[-1]+4)):\n larger_grid[0][i] = lon[loIdx]\n larger_grid[1][j] = lat[laIdx]\n larger_grid[2][i,j] = ssl[loIdx,laIdx]\n\n response = plot_grids(eddy_data, lo, la, larger_grid, title)\n if response not in choices: # TODO: feel like this is a silly way of doing this\n break\n if response == 'Center':\n # Find the center from water level\n logger.info(f\"+++ Centering eddy towards a minima/maxima depending on eddy type\")\n if cyclone==1:\n idx = np.unravel_index(eddy_data[1].argmax(), eddy_data[1].shape)\n ctrCoord = lon[lonIdxs[idx[0]]], lat[latIdxs[idx[1]]]\n logger.info(f\"+++ Argmax center -> lon: {ctrCoord[0]}, Center lat: {ctrCoord[1]}\")\n else:\n idx = np.unravel_index(eddy_data[1].argmin(), eddy_data[1].shape)\n ctrCoord = lon[lonIdxs[idx[0]]], lat[latIdxs[idx[1]]]\n logger.info(f\"+++ Argmin center -> lon: {ctrCoord[0]}, Center lat: {ctrCoord[1]}\")\n\n # New width and height in case we've moved in lon/lat direction\n width, height = abs(lo[0]-lo[-1])+dlon, abs(la[0]-la[-1])+dlat\n\n lon_bnds = ctrCoord[0]-width/2.0, ctrCoord[0]+width/2.0\n lat_bnds = ctrCoord[1]-height/2.0, ctrCoord[1]+height/2.0\n\n # Indeces of current eddy image\n lonIdxs = np.where((lon >= lon_bnds[0]) & (lon <= lon_bnds[1]))[0]\n latIdxs = np.where((lat >= lat_bnds[0]) & (lat <= lat_bnds[1]))[0]\n\n elif response == 'incLon':\n if (lonIdxs[0] <= 0 or lonIdxs[-1] >= nLon-1): \n logger.info(f\"+++ Longitude can't be increased further\")\n else:\n lonIdxs = np.arange(lonIdxs[0]-1, lonIdxs[-1]+2)\n logger.info(f\"+++ Increasing lontitude by 1 cell in both directions to ({lonIdxs[0]}:{lonIdxs[-1]})\")\n elif response == 'incLat':\n if (latIdxs[0] <= 0 or latIdxs[-1] >= nLat-1): \n logger.info(f\"+++ Latitude can't be increased further\")\n else:\n latIdxs = np.arange(latIdxs[0]-1, latIdxs[-1]+2)\n logger.info(f\"+++ Increasing latitude by 1 cell in both directions to ({latIdxs[0]}:{latIdxs[-1]})\")\n elif response == 'decLon':\n lonIdxs = np.arange(lonIdxs[0]+1, lonIdxs[-1])\n logger.info(f\"+++ Decreasing lontitude by 1 cell in both directions to ({lonIdxs[0]}:{lonIdxs[-1]})\")\n elif response == 'decLat':\n latIdxs = np.arange(latIdxs[0]+1, latIdxs[-1])\n logger.info(f\"+++ Decreasing latitude by 1 cell in both directions to ({latIdxs[0]}:{latIdxs[-1]})\")\n eddy_data = np.array([np.zeros((lonIdxs.size,latIdxs.size)) for _ in range(6)]) \n\n #----------------------------------------------------------\n \n lo = lon[lonIdxs]\n la = lat[latIdxs]\n\n #guiEvent, guiValues = show_figure(fig)\n #add = 'Yes' # Bypass GUI selection\n if response=='Yes':\n savedImgCounter = savedImgCounter + 1\n # Create images?\n '''\n dirPath = 'C:/Master/TTK-4900-Master/images/'+dateStr+'/'\n if not os.path.exists(dirPath):\n os.makedirs(dirPath)\n imPath = dirPath + title + f\"_{savedImgCounter}.png\" \n plt.savefig(imPath, bbox_inches='tight')\n '''\n\n sst_train.append([eddy_data[0], cyclone]) # [data, label]\n ssl_train.append([eddy_data[1], cyclone]) \n uvel_train.append([eddy_data[2], cyclone]) \n vvel_train.append([eddy_data[3], cyclone]) \n phase_train.append([eddy_data[4], cyclone]) \n\n logger.info(f\"+++++ Saving image {eddyId} as an eddy\") \n\n else: \n logger.info(f\"+++++ Discarding image {eddyId}\")\n \n # =========================================================\n # ================ Select non-eddy images =================\n # =========================================================\n\n if savedImgCounter <= 0:\n logger.info(f\"+++++ No eddies found\")\n continue \n\n # Subgrid (sg) longitude and latitude length\n sgLon, sgLat = dim.find_avg_dim(sst_train, start_axis=0) \n logger.info(f\"+++++ Using average dimensions ({sgLon}, {sgLat}) for non-eddy\")\n\n loRange, laRange = range(0, nLon, sgLon), range(0, nLat, sgLat)\n \n # Create OW array of compatible dimensions for comparing masks\n OW_noeddy = OW[:loRange[-1],:laRange[-1]]\n OW_noeddy = create_subgrids( np.ma.masked_where(OW_noeddy < -0.8, OW_noeddy), sgLon, sgLat, 1 )\n\n # Get a 2d grid of indeces -> make it moldable to the average grid -> convert to subgrids\n idx_subgrids = create_subgrids( np.array( index_list(nLon, nLat) )[:loRange[-1],:laRange[-1]], sgLon, sgLat, 2 )\n\n noneddy_idx_subgrids = []\n for i, grid in enumerate(OW_noeddy):\n if not np.ma.is_masked(grid):\n noneddy_idx_subgrids.append(idx_subgrids[i])\n\n nNoneddies = len(noneddy_idx_subgrids)\n data_noeddy = np.array([[np.zeros((sgLon,sgLat)) for _ in range(nNoneddies)] for _ in range(6)])\n \n # Shuffle the noneddies and loop thorugh untill we have chosen the same amount of non-eddies as eddies\n random.shuffle(noneddy_idx_subgrids)\n added = 0\n for grid_id, idx_grid in enumerate(noneddy_idx_subgrids):\n OW_ = np.zeros((idx_grid.shape[:2]))\n for i in range(len(idx_grid)):\n for j in range(len(idx_grid[0])):\n idx = idx_grid[i,j][0], idx_grid[i,j][1]\n for k in range(len(data_noeddy)):\n data_noeddy[k,grid_id,i,j] = datasets[k][idx]\n #print(idx_grid)\n lo, la = lon[idx_grid[:,0,0]], lat[idx_grid[0,:,1]]\n title = dateStr + \"_noneddy\"\n add = plot_grids(data_noeddy[:,grid_id,:,:], lo, la, None, title)\n if add=='Yes':\n added = added + 1\n sst_train.append([data_noeddy[0,grid_id,:,:], 0]) # [data, label]\n ssl_train.append([data_noeddy[0,grid_id,:,:], 0]) \n uvel_train.append([data_noeddy[0,grid_id,:,:], 0]) \n vvel_train.append([data_noeddy[0,grid_id,:,:], 0]) \n phase_train.append([data_noeddy[0,grid_id,:,:], 0])\n logger.info(f\"+++++ Saving noneddy\") \n if added >= savedImgCounter:\n break\n\n # =========================================================\n # ============== Interpolate ==============\n # =========================================================\n\n #sst_out = np.array(sst_train)\n #ssl_out = np.array(ssl_train)\n #uvel_out = np.array(uvel_train)\n #vvel_out = np.array(vvel_train)\n #phase_out = np.array(phase_train)\n #nTeddies = sst_out.shape[0]\n\n\n logger.info(f\"Compressing and storing training data so far\")\n\n\n # =========================================================\n # ========== Save data as compressed numpy array ==========\n # =========================================================\n\n save_npz_array( (sst_train, ssl_train, uvel_train, vvel_train, phase_train) )\n\n\ndef adjustment_data():\n ''' Method to run the ML model to provide correctional non-eddy images for the model '''\n\n ncpath = 'C:/Master/data/cmems_data/global_10km/2018/phys_noland_2018_001.nc'\n\n (ds,t,lon,lat,depth,uvel_full,vvel_full,sst_full,ssl_full) = load_netcdf4(ncpath)\n\n ssl_probLim = 0.95\n phase_probLim = 0.35\n stepSize = 8\n scaler = MinMaxScaler(feature_range=(-1,1))\n\n clf = load_model('models/new/cnn_mult_full.h5')\n\n winW, winH = int(14), int(8)\n dSize = (winW, winH)\n\n # Lists that will hold the training data\n sst_train = []\n ssl_train = []\n uvel_train = []\n vvel_train = []\n phase_train = []\n nDataset = 5\n\n # Shuffle the time so that the expert won't see the same long-lasting eddies\n for i, day in enumerate(random.sample(range(0, len(t)), len(t))): \n\n ssl = np.array(ssl_full[day].T, dtype='float32') \n sst = np.array(sst_full[day].T, dtype='float32') \n uvel = np.array(uvel_full[day,0].T, dtype='float32') \n vvel = np.array(vvel_full[day,0].T, dtype='float32') \n with np.errstate(all='ignore'): # Disable zero div warning\n phase = xr.ufuncs.rad2deg( xr.ufuncs.arctan2(vvel, uvel) ) + 180\n\n shape = ssl.shape\n ssl_scaled = scaler.fit_transform(ssl)\n uvel_scaled = scaler.fit_transform(uvel)\n vvel_scaled = scaler.fit_transform(vvel)\n phase_scaled = scaler.fit_transform(phase)\n\n # loop over the sliding window of indeces\n for x, y, (lonIdxs, latIdxs) in sliding_window(ssl, stepSize=stepSize, windowSize=dSize):\n\n if lonIdxs[-1] >= shape[0] or latIdxs[-1] >= shape[1]:\n continue\n dSize = (winH, winW)\n # Window indexed data and resizing from a smaller window to model size\n sst_wind = np.array([[sst[i,j] for j in latIdxs] for i in lonIdxs])\n ssl_wind = np.array([[ssl[i,j] for j in latIdxs] for i in lonIdxs])\n ssl_scaled_wind = np.array([[ssl_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n phase_wind = np.array([[phase[i,j] for j in latIdxs] for i in lonIdxs])\n phase_scaled_wind = np.array([[phase_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n uvel_wind = np.array([[uvel[i,j] for j in latIdxs] for i in lonIdxs])\n uvel_scaled_wind = np.array([[uvel_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n vvel_wind = np.array([[vvel[i,j] for j in latIdxs] for i in lonIdxs])\n vvel_scaled_wind = np.array([[vvel_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n\n #channels = [ssl_scaled_wind, uvel_scaled_wind, vvel_scaled_wind, phase_scaled_wind]\n channels = [uvel_scaled_wind, vvel_scaled_wind]\n nChannels = len(channels)\n X_cnn = np.zeros((winW,winH,nChannels))\n for lo in range(winW): # Row\n for la in range(winH): # Column\n #X_cnn[i,lo,la,0] = X[0][i][lo][la]\n for c in range(nChannels): # Channels\n X_cnn[lo,la,c] = channels[c][lo][la]\n\n X_cnn = np.expand_dims(X_cnn, 0)\n\n lo, la = lon[lonIdxs], lat[latIdxs]\n\n # Predict and receive probability\n prob = clf.predict(X_cnn)\n\n # By default we say we have a non-eddy (cyclone flag)\n cyclone_f = 0\n # If second column is larger than the boundary, we have a anti-cyclone\n if prob[0,1] > ssl_probLim: \n print('anti-cyclone | prob: {} | lon: [{}, {}] | lat: [{}, {}]'.format(prob[0,1]*100,lo[0],lo[-1],la[0],la[-1]))\n cyclone_f = -1\n # If third column is larger, we have a cyclone\n elif prob[0,2] > ssl_probLim:\n print('cyclone | prob: {} | lon: [{}, {}, lat: [{}, {}]'.format(prob[0,2]*100,lo[0],lo[-1],la[0],la[-1])) \n cyclone_f = 1\n\n eddy_data = [sst_wind, ssl_wind, uvel_wind, vvel_wind, phase_wind]\n \n # Plot and flag if the prediction is correct or not\n yes_no = plot_grids(eddy_data, lo, la, None, check_cyclone(cyclone_f))\n # Add to training data if expert labels it correct\n if yes_no == 'Yes':\n sst_train.append([sst_wind, cyclone_f]) \n ssl_train.append([ssl_wind, cyclone_f]) \n uvel_train.append([ssl_wind, cyclone_f]) \n vvel_train.append([ssl_wind, cyclone_f]) \n phase_train.append([ssl_wind, cyclone_f])\n # If not, change the label to non-eddy\n elif yes_no == 'No':\n sst_train.append([sst_wind, 0]) \n ssl_train.append([ssl_wind, 0]) \n uvel_train.append([ssl_wind, 0]) \n vvel_train.append([ssl_wind, 0]) \n phase_train.append([ssl_wind, 0])\n \n # Every 10 sample add to the compressed array\n if i%10==0:\n # ADD TO THE COMPRESSED NUMPY ARRAY\n savedir = 'C:/Master/TTK-4900-Master/data/adjustment_data/'\n ds = [sst_train, ssl_train, uvel_train, vvel_train, phase_train]\n save_npz_array(ds, savedir)\n\nif __name__ == '__main__':\n #semi_automatic_training()\n adjustment_data()\n"
] | [
[
"matplotlib.colors.BoundaryNorm",
"numpy.append",
"numpy.ma.masked_where",
"numpy.expand_dims",
"matplotlib.pyplot.get_cmap",
"numpy.where",
"numpy.load",
"numpy.ma.is_masked",
"numpy.zeros",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.arange",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.savez_compressed",
"matplotlib.ticker.MaxNLocator",
"numpy.errstate",
"numpy.sqrt"
]
] |
Nivolves2000/hospital-crm | [
"16ed4448e7aa720c102f6fcd56815df4e491aad1"
] | [
"backend/SystemBack1/FindLiverClass.py"
] | [
"import os\r\nimport json\r\nimport numpy as np\r\nimport pandas as pd\r\nimport FeaturesStack as FS\r\n\r\n\r\ndef calculate_gmdh_model(img_f):\r\n if task_type == \"1\":\r\n if sensor_type == \"convex\":\r\n prob = (\r\n -0.946477\r\n + img_f[\"std_vert\"] * np.cbrt(img_f[\"P95(1)_vert\"]) * 0.0171222\r\n + np.power(img_f[\"balx2_hor\"], 3)\r\n * np.sin(img_f[\"dif12_hor\"])\r\n * (-1.583e-05)\r\n + img_f[\"P5_vert\"] * np.cos(img_f[\"pair6664_vert\"]) * (-0.007739)\r\n + np.cbrt(img_f[\"x2_vert\"]) * np.cbrt(img_f[\"balx2_vert\"]) * 0.0831053\r\n + np.cos(img_f[\"pair3947_hor\"]) * np.cos(img_f[\"dif12_vert\"]) * 0.413282\r\n + np.cos(img_f[\"pair4639_hor\"])\r\n * np.cos(img_f[\"pair6967_vert\"])\r\n * (-0.141326)\r\n + np.cbrt(img_f[\"maxfreq_hor\"])\r\n * np.cbrt(img_f[\"mean(1)_vert\"])\r\n * 0.396514\r\n + np.cos(img_f[\"pair4639_hor\"])\r\n * np.arctan(img_f[\"pair5555_vert\"])\r\n * 0.123721\r\n + np.sqrt(img_f[\"pair5045_hor\"])\r\n * np.cos(img_f[\"pair4846_hor\"])\r\n * (-0.110306)\r\n + np.sqrt(img_f[\"maxfreq_orig\"])\r\n * np.power(img_f[\"balx2_hor\"], 3)\r\n * 1.51139e-05\r\n + img_f[\"dif13_vert\"] * np.cbrt(img_f[\"x2_orig\"]) * 0.0276597\r\n )\r\n elif sensor_type == \"linear\":\r\n prob = (\r\n 0.521463\r\n + np.cos(img_f[\"fractal_dim\"])\r\n * np.arctan(img_f[\"pair1526_hor\"])\r\n * (-0.510109)\r\n + np.cbrt(img_f[\"x2_orig\"]) * np.arctan(img_f[\"std(3)_hor\"]) * 0.320271\r\n + np.sin(img_f[\"Q1_vert\"]) * np.cos(img_f[\"skew(2)_vert\"]) * 0.347042\r\n + np.cbrt(img_f[\"median(2)_hor\"]) * np.cos(img_f[\"Q3_vert\"]) * 0.120014\r\n + np.sin(img_f[\"x1_orig\"]) * np.sin(img_f[\"pair5050_vert\"]) * 0.149371\r\n + np.power(img_f[\"kurt(1)_hor\"], 2)\r\n * np.cos(img_f[\"pair2820_hor\"])\r\n * 0.107874\r\n + np.power(img_f[\"pair4845_vert\"], 3)\r\n * np.cos(img_f[\"mean(3)_vert\"])\r\n * 1.95106e-05\r\n + np.cos(img_f[\"mean(3)_vert\"])\r\n * np.arctan(img_f[\"mean(2)_hor\"])\r\n * (-0.115669)\r\n )\r\n\r\n elif sensor_type == \"reinforced_linear\":\r\n prob = (\r\n 0.564665\r\n + np.cbrt(img_f[\"pair2420_hor\"])\r\n * np.arctan(img_f[\"P5(1)_hor\"])\r\n * (-0.185308)\r\n + np.sin(img_f[\"std_hor\"]) * np.sin(img_f[\"pair5359_vert\"]) * 0.529036\r\n + np.cos(img_f[\"range_vert\"])\r\n * np.cos(img_f[\"pair7878_vert\"])\r\n * (-0.326662)\r\n + np.sin(img_f[\"pair6574_vert\"])\r\n * np.cos(img_f[\"Q3(1)_hor\"])\r\n * (-0.337944)\r\n + np.cos(img_f[\"IQR_vert\"])\r\n * np.cos(img_f[\"median(2)_vert\"])\r\n * (-0.237002)\r\n + np.sin(img_f[\"pair5359_vert\"])\r\n * np.cos(img_f[\"median(2)_vert\"])\r\n * (-0.118517)\r\n + np.cos(img_f[\"median(2)_vert\"])\r\n * np.arctan(img_f[\"P5(1)_hor\"])\r\n * 0.138423\r\n + np.cos(img_f[\"pair6574_vert\"])\r\n * np.arctan(img_f[\"pair5649_vert\"])\r\n * 0.051217\r\n + np.sin(img_f[\"pair5359_vert\"])\r\n * np.arctan(img_f[\"x2_vert\"])\r\n * 0.296591\r\n + img_f[\"dif23_vert\"] * np.cos(img_f[\"dif23_vert\"]) * 0.914249\r\n )\r\n else:\r\n prob = 0\r\n elif task_type == \"2\":\r\n prob = 0\r\n else:\r\n prob = 0\r\n return prob, 1 if prob < 0.5 else 2\r\n\r\n\r\ndef forest_prediction(img_f):\r\n if task_type == \"1\":\r\n with open(os.path.join(cur_dir, \"SystemBack/SelfOrganizationForests/\" + sensor_type + \".json\")) as f:\r\n forest = json.load(f)\r\n ypl = [] # y_pred list\r\n for obj in forest:\r\n tree = pd.DataFrame(obj[\"tree\"])\r\n leaf = 1\r\n index = 0\r\n flag = False\r\n y_pred = 0\r\n while not flag:\r\n node = tree.loc[index]\r\n if node[\"side\"] == 1:\r\n if img_f[node[\"feature\"]] < node[\"threshold\"]:\r\n y_pred = 1\r\n else:\r\n y_pred = 2\r\n else:\r\n if img_f[node[\"feature\"]] < node[\"threshold\"]:\r\n y_pred = 2\r\n else:\r\n y_pred = 1\r\n try:\r\n index = np.where(\r\n (tree[\"previous_leaf\"] == leaf)\r\n & (tree[\"previous_direction\"] == y_pred)\r\n )[0][0]\r\n leaf = tree.loc[index][\"leaf_number\"]\r\n except:\r\n flag = True\r\n ypl.append(y_pred)\r\n ypl = np.asarray(ypl)\r\n ypl_sum = np.sum(ypl == 1) + np.sum(ypl == 2)\r\n if np.sum(ypl == 1) > np.sum(ypl == 2):\r\n y_pred = 1\r\n forest_prob = (np.sum(ypl == 1) / ypl_sum) * 100\r\n else:\r\n y_pred = 2\r\n forest_prob = (np.sum(ypl == 2) / ypl_sum) * 100\r\n elif task_type == \"2\":\r\n forest_prob = 0\r\n y_pred = 0\r\n else:\r\n forest_prob = 0\r\n y_pred = 0\r\n return forest_prob, y_pred\r\n\r\n\r\ndef get_mean_signs(img_f):\r\n if task_type == \"1\":\r\n if sensor_type == \"convex\":\r\n feature1, feature2, feature3 = (\r\n \"cbrt(P95(1)_vert)\",\r\n \"cos(dif12_vert)\",\r\n \"std_vert\",\r\n )\r\n threshold1, threshold2, threshold3 = (\r\n 5.0132979349645845,\r\n 0.6306169224667781,\r\n 7.127663290343068,\r\n )\r\n value1, value2, value3 = (\r\n np.cbrt(img_f[\"P95(1)_vert\"]),\r\n np.cos(img_f[\"dif12_vert\"]),\r\n img_f[\"std_vert\"],\r\n )\r\n if value1 < threshold1:\r\n res1 = \"Печень в норме\"\r\n else:\r\n res1 = \"Печень не в норме\"\r\n if value2 < threshold2:\r\n res2 = \"Печень не в норме\"\r\n else:\r\n res2 = \"Печень в норме\"\r\n if value3 < threshold3:\r\n res3 = \"Печень в норме\"\r\n else:\r\n res3 = \"Печень не в норме\"\r\n elif sensor_type == \"linear\":\r\n feature1, feature2, feature3 = (\r\n \"cbrt(x2_orig)\",\r\n \"arctan(pair1526_hor)\",\r\n \"cos(fractal_dim)\",\r\n )\r\n threshold1, threshold2, threshold3 = (\r\n 0.6440777961495892,\r\n 1.3522438545232742,\r\n 0.41596845937104104,\r\n )\r\n value1, value2, value3 = (\r\n np.cbrt(img_f[\"x2_orig\"]),\r\n np.arctan(img_f[\"pair1526_hor\"]),\r\n np.cos(img_f[\"fractal_dim\"]),\r\n )\r\n if value1 < threshold1:\r\n res1 = \"Печень в норме\"\r\n else:\r\n res1 = \"Печень не в норме\"\r\n if value2 < threshold2:\r\n res2 = \"Печень не в норме\"\r\n else:\r\n res2 = \"Печень в норме\"\r\n if value3 < threshold3:\r\n res3 = \"Печень не в норме\"\r\n else:\r\n res3 = \"Печень в норме\"\r\n elif sensor_type == \"reinforced_linear\":\r\n feature1, feature2, feature3 = (\r\n \"cos(range_vert)\",\r\n \"cbrt(pair2420_hor)\",\r\n \"sin(pair5359_vert)\",\r\n )\r\n threshold1, threshold2, threshold3 = (\r\n 0.9998433086476912,\r\n 1.6407957194770635,\r\n -0.5549728719823037,\r\n )\r\n value1, value2, value3 = (\r\n np.cos(img_f[\"range_vert\"]),\r\n np.cbrt(img_f[\"pair2420_hor\"]),\r\n np.sin(img_f[\"pair5359_vert\"]),\r\n )\r\n if value1 < threshold1:\r\n res1 = \"Печень в норме\"\r\n else:\r\n res1 = \"Печень не в норме\"\r\n if value2 < threshold2:\r\n res2 = \"Печень не в норме\"\r\n else:\r\n res2 = \"Печень в норме\"\r\n if value3 < threshold3:\r\n res3 = \"Печень не в норме\"\r\n else:\r\n res3 = \"Печень в норме\"\r\n else:\r\n feature1, feature2, feature3 = \"\", \"\", \"\"\r\n threshold1, threshold2, threshold3 = 0, 0, 0\r\n value1, value2, value3 = 0, 0, 0\r\n res1, res2, res3 = 0, 0, 0\r\n elif task_type == \"2\":\r\n feature1, feature2, feature3 = \"\", \"\", \"\"\r\n threshold1, threshold2, threshold3 = 0, 0, 0\r\n value1, value2, value3 = 0, 0, 0\r\n res1, res2, res3 = 0, 0, 0\r\n else:\r\n feature1, feature2, feature3 = \"\", \"\", \"\"\r\n threshold1, threshold2, threshold3 = 0, 0, 0\r\n value1, value2, value3 = 0, 0, 0\r\n res1, res2, res3 = 0, 0, 0\r\n return [\r\n {\"feature\": feature1, \"threshold\": threshold1, \"value\": value1, \"result\": res1},\r\n {\"feature\": feature2, \"threshold\": threshold2, \"value\": value2, \"result\": res2},\r\n {\"feature\": feature3, \"threshold\": threshold3, \"value\": value3, \"result\": res3},\r\n ]\r\n\r\n\r\ndef get_all_features():\r\n with open(os.path.join(cur_dir, \"SystemBack/Features/\", filename)) as f:\r\n feature_names = json.load(f)[\"features\"]\r\n with open(os.path.join(cur_dir, \"SystemBack/BestGrad/\", filename)) as f:\r\n best_grad = json.load(f)\r\n with open(os.path.join(cur_dir, \"SystemBack/MaxFeatures/\", filename)) as f:\r\n best_pairs = json.load(f)\r\n\r\n img_f = []\r\n\r\n # fractal dimension of image\r\n img_f.append(FS.mink_val(path))\r\n\r\n # initial matrix\r\n init_matrix = np.concatenate(FS.get_greyscale_matrix(path), axis=None)\r\n img_f.append((np.sum(init_matrix == np.amin(init_matrix)) / init_matrix.size) * 100)\r\n img_f.append((np.sum(init_matrix == np.amax(init_matrix)) / init_matrix.size) * 100)\r\n\r\n # glcm\r\n glcm = FS.get_glcm(init_matrix)\r\n img_f = FS.get_x1x2x3(\r\n glcm, img_f, best_grad[\"initstandard\"], best_grad[\"initbalanced\"]\r\n )\r\n\r\n # horizontal differential matrix\r\n img_f, diff_matrix = FS.get_norm_features(\r\n FS.get_greyscale_matrix(path),\r\n img_f,\r\n \"hor\",\r\n best_grad[\"horstandard\"],\r\n best_grad[\"horbalanced\"],\r\n best_pairs[\"hor\"],\r\n flag=True,\r\n )\r\n\r\n # vertical differential matrix\r\n img_f = FS.get_norm_features(\r\n FS.get_greyscale_matrix(path),\r\n img_f,\r\n \"vert\",\r\n best_grad[\"vertstandard\"],\r\n best_grad[\"vertbalanced\"],\r\n best_pairs[\"vert\"],\r\n )\r\n return pd.DataFrame([img_f], columns=feature_names).iloc[0], diff_matrix\r\n\r\n\r\ndef get_classification_results(parameters):\r\n # task_type: 1 - норма/патология, 2 - стадия фиброза\r\n global sensor_type, path, task_type, cur_dir, filename\r\n sensor_type, path, task_type = (\r\n parameters[\"sensor_type\"],\r\n parameters[\"path\"],\r\n parameters[\"task_type\"],\r\n )\r\n cur_dir, filename = parameters[\"cur_dir\"], parameters[\"filename\"]\r\n\r\n (\r\n img_f,\r\n diff_matrix,\r\n ) = get_all_features() # img_f - image features (признаки изображения)\r\n\r\n # МГУА\r\n gmdh_prob, gmdh_liver_class = calculate_gmdh_model(img_f)\r\n if gmdh_prob > 1 or gmdh_prob < 0:\r\n gmdh_prob = 100\r\n elif gmdh_liver_class == 2:\r\n gmdh_prob = round(gmdh_prob * 100, 1)\r\n elif gmdh_liver_class == 1:\r\n gmdh_prob = round((1 - gmdh_prob) * 100, 1)\r\n gmdh_result = \"Печень в норме\" if gmdh_liver_class == 1 else \"Печень не в норме\"\r\n\r\n # Лес самоорганизации\r\n forest_prob, forest_liver_class = forest_prediction(img_f)\r\n forest_result = \"Печень в норме\" if forest_liver_class == 1 else \"Печень не в норме\"\r\n\r\n # Пороги трёх наилучших признаков\r\n mean_signs = get_mean_signs(img_f)\r\n\r\n return (\r\n {\r\n \"gmdh_result\": gmdh_result,\r\n \"gmdh_probability\": gmdh_prob,\r\n \"forest_result\": forest_result,\r\n \"forest_probability\": forest_prob,\r\n \"mean_signs\": mean_signs,\r\n },\r\n diff_matrix,\r\n )\r\n\r\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.cbrt",
"numpy.arctan",
"pandas.DataFrame",
"numpy.asarray",
"numpy.cos",
"numpy.amin",
"numpy.power",
"numpy.amax",
"numpy.sin",
"numpy.where"
]
] |
RayshineRen/Introduction_to_Data_Science_in_Python | [
"b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71"
] | [
"week2/week2.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 14 19:28:11 2020\r\n\r\n@author: Ray\r\n@email: [email protected]\r\n@wechat: RayTing0305\r\n\"\"\"\r\n\r\n###chapter5\r\n\r\nimport pandas as pd\r\nfrom pandas import Series, DataFrame\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nnp.random.seed(12345)\r\nplt.rc('figure', figsize=(10, 6))\r\nPREVIOUS_MAX_ROWS = pd.options.display.max_rows\r\npd.options.display.max_rows = 20\r\nnp.set_printoptions(precision=4, suppress=True)\r\n\r\n\r\n### Series\r\n\r\nobj = pd.Series([4, 7, -5, 3])\r\nobj_array = obj.values\r\nobj_range = obj.index\r\n\r\nobj2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])\r\nobj2_array = obj2.values\r\nobj2_range = obj2.index\r\n\r\nobj3 = obj2[['a','c','d']]\r\nobj3_array = obj3.values\r\nobj3_range = obj3.index\r\n\r\nobj4 = obj2[obj2>0]\r\nobj5 = obj2*2\r\nobj6 = np.exp(obj2)\r\n\r\n#print('b' in obj2)\r\n#print('e' in obj2)\r\n\r\n\r\nsdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}\r\nobj7 = pd.Series(sdata)\r\n\r\nstates = ['California', 'Ohio', 'Oregon', 'Texas']\r\nobj8 = pd.Series(sdata, index=states)\r\n\r\n#print(pd.isnull(obj8))\r\n#print(pd.notnull(obj8))\r\n\r\nobj9 = obj7 + obj8\r\n\r\nobj8.name = 'population'\r\nobj8.index.name = 'state'\r\n\r\n\r\n\r\n####DataFrame\r\n\r\ndata = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],\r\n 'year': [2000, 2001, 2002, 2001, 2002, 2003],\r\n 'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}\r\nframe = pd.DataFrame(data)\r\nprint(frame.state)\r\n#print(frame.head())\r\n#print(frame.columns)\r\n\r\nframe = pd.DataFrame(data, columns=['year', 'state', 'pop'])\r\n\r\nframe2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],\r\n index=['one', 'two', 'three', 'four',\r\n 'five', 'six'])\r\nfc1 = frame2['state']\r\nfc2 = frame2.state\r\n#print(fc1==fc2)\r\n#print(id(fc1)==id(fc2))\r\n\r\nfr1 = frame2.loc['two']\r\n#print(fr1)\r\n\r\nframe2['debt'] = np.arange(6.)\r\n#print(frame2)\r\n\r\nval = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])\r\nframe2['debt'] = val\r\n#print(frame2)\r\n\r\nframe2['eastern'] = frame2.state == 'Ohio'\r\n\r\ndel frame2['eastern']\r\n\r\npop = {'Nevada': {2001: 2.4, 2002: 2.9},\r\n 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}\r\nframe3 = pd.DataFrame(pop)\r\n\r\n#print(frame3.T)\r\n\r\nframe4 = pd.DataFrame(pop, index=[2001, 2002, 2003])\r\n\r\npdata = {'Ohio': frame3['Ohio'][:-1],\r\n 'Nevada': frame3['Nevada'][:2]}\r\nframe5 = pd.DataFrame(pdata)\r\n\r\nframe3.index.name='year'\r\nframe3.columns.name = 'state'\r\n#print(frame3.values)\r\n\r\n### Index Objects\r\nobj = pd.Series(range(3), index=['a', 'b', 'c'])\r\nindex = obj.index\r\n\r\n##index[1] = 'd' # TypeError\r\n\r\nlabels = pd.Index(np.arange(3))\r\ndup_labels = pd.Index(['foo', 'foo', 'bar', 'bar'])\r\nframe6 = pd.Series(np.arange(4), index = dup_labels)\r\n#print(frame6['foo'])\r\n\r\n\r\n### Essential Functionality\r\n\r\nobj = pd.Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])\r\nobj2 = obj.reindex(['a', 'b', 'c', 'd', 'e'])\r\nobj3 = pd.Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])\r\nobj4 = obj3.reindex(range(6), method='ffill')\r\n\r\nframe = pd.DataFrame(np.arange(9).reshape((3, 3)),\r\n index=['a', 'c', 'd'],\r\n columns=['Ohio', 'Texas', 'California'])\r\nframe2 = frame.reindex(['a', 'b', 'c', 'd'])\r\n\r\nstates = ['Texas', 'Utah', 'California']\r\nframe3 = frame.reindex(columns=states)\r\n\r\n#fr = frame.loc[['a', 'c'], states]\r\n\r\n\r\n## Dropping Entries from an Axis\r\nobj = pd.Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e'])\r\nnew_obj = obj.drop(['c', 'd'])\r\n\r\n\r\nobj = pd.Series(np.arange(4.), index=['a', 'b', 'c', 'd'])\r\nobj2 = obj[['b', 'a', 'd']]\r\nobj3 = obj[[1, 3]]\r\nobj4 = obj[obj<2]\r\nobj5 = obj['b':'e']\r\nobj['b':'c'] = 5\r\n\r\ndata = pd.DataFrame(np.arange(16).reshape((4, 4)),\r\n index=['Ohio', 'Colorado', 'Utah', 'New York'],\r\n columns=['one', 'two', 'three', 'four'])\r\n#print(data)\r\n#print(data[:2])\r\n#print(data[data['three']>5])\r\n#data[data<5]=0\r\n#print(data)\r\n\r\nloc = data.loc['Colorado', ['two', 'three']]\r\n\r\nloc2 = data.iloc[2, [3, 0, 1]]\r\n#print(loc2)\r\nloc3 = data.iloc[2]\r\nloc4 = data.iloc[[1, 2], [3, 0, 1]]\r\n#print(loc4)\r\nloc5 = data.iloc[:, :3][data.three > 5]\r\n#print(loc5)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"pandas.Series",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.exp",
"numpy.arange",
"pandas.Index"
]
] |
joeranbosma/nnDetection | [
"2ebbf1cdc8a8794c73e325f06fea50632c78ae8c"
] | [
"nndet/ptmodule/retinaunet/base.py"
] | [
"\"\"\"\nCopyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport copy\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom functools import partial\nfrom typing import Callable, Hashable, Sequence, Dict, Any, Type\n\nimport torch\nimport numpy as np\nfrom loguru import logger\nfrom torchvision.models.detection.rpn import AnchorGenerator\n\nfrom nndet.utils.tensor import to_numpy\nfrom nndet.evaluator.det import BoxEvaluator\nfrom nndet.evaluator.seg import SegmentationEvaluator\n\nfrom nndet.core.retina import BaseRetinaNet\nfrom nndet.core.boxes.matcher import IoUMatcher\nfrom nndet.core.boxes.sampler import HardNegativeSamplerBatched\nfrom nndet.core.boxes.coder import CoderType, BoxCoderND\nfrom nndet.core.boxes.anchors import get_anchor_generator\nfrom nndet.core.boxes.ops import box_iou\nfrom nndet.core.boxes.anchors import AnchorGeneratorType\n\nfrom nndet.ptmodule.base_module import LightningBaseModuleSWA, LightningBaseModule\n\nfrom nndet.arch.conv import Generator, ConvInstanceRelu, ConvGroupRelu\nfrom nndet.arch.blocks.basic import StackedConvBlock2\nfrom nndet.arch.encoder.abstract import EncoderType\nfrom nndet.arch.encoder.modular import Encoder\nfrom nndet.arch.decoder.base import DecoderType, BaseUFPN, UFPNModular\nfrom nndet.arch.heads.classifier import ClassifierType, CEClassifier\nfrom nndet.arch.heads.regressor import RegressorType, L1Regressor\nfrom nndet.arch.heads.comb import HeadType, DetectionHeadHNM\nfrom nndet.arch.heads.segmenter import SegmenterType, DiCESegmenter\n\nfrom nndet.training.optimizer import get_params_no_wd_on_norm\nfrom nndet.training.learning_rate import LinearWarmupPolyLR\n\nfrom nndet.inference.predictor import Predictor\nfrom nndet.inference.sweeper import BoxSweeper\nfrom nndet.inference.transforms import get_tta_transforms, Inference2D\nfrom nndet.inference.loading import get_loader_fn\nfrom nndet.inference.helper import predict_dir\nfrom nndet.inference.ensembler.segmentation import SegmentationEnsembler\nfrom nndet.inference.ensembler.detection import BoxEnsemblerSelective\n\nfrom nndet.io.transforms import (\n Compose,\n Instances2Boxes,\n Instances2Segmentation,\n FindInstances,\n )\n\n\nclass RetinaUNetModule(LightningBaseModuleSWA):\n base_conv_cls = ConvInstanceRelu\n head_conv_cls = ConvGroupRelu\n block = StackedConvBlock2\n encoder_cls = Encoder\n decoder_cls = UFPNModular\n matcher_cls = IoUMatcher\n head_cls = DetectionHeadHNM\n head_classifier_cls = CEClassifier\n head_regressor_cls = L1Regressor\n head_sampler_cls = HardNegativeSamplerBatched\n segmenter_cls = DiCESegmenter\n\n def __init__(self,\n model_cfg: dict,\n trainer_cfg: dict,\n plan: dict,\n **kwargs\n ):\n \"\"\"\n RetinaUNet Lightning Module Skeleton\n \n Args:\n model_cfg: model configuration. Check :method:`from_config_plan`\n for more information\n trainer_cfg: trainer information\n plan: contains parameters which were derived from the planning\n stage\n \"\"\"\n super().__init__(\n model_cfg=model_cfg,\n trainer_cfg=trainer_cfg,\n plan=plan,\n )\n\n _classes = [f\"class{c}\" for c in range(plan[\"architecture\"][\"classifier_classes\"])]\n self.box_evaluator = BoxEvaluator.create(\n classes=_classes,\n fast=True,\n save_dir=None,\n )\n self.seg_evaluator = SegmentationEvaluator.create()\n\n self.pre_trafo = Compose(\n FindInstances(\n instance_key=\"target\",\n save_key=\"present_instances\",\n ),\n Instances2Boxes(\n instance_key=\"target\",\n map_key=\"instance_mapping\",\n box_key=\"boxes\",\n class_key=\"classes\",\n present_instances=\"present_instances\",\n ),\n Instances2Segmentation(\n instance_key=\"target\",\n map_key=\"instance_mapping\",\n present_instances=\"present_instances\",\n )\n )\n\n self.eval_score_key = \"mAP_IoU_0.10_0.50_0.05_MaxDet_100\"\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Computes a single training step\n See :class:`BaseRetinaNet` for more information\n \"\"\"\n with torch.no_grad():\n batch = self.pre_trafo(**batch)\n\n losses, _ = self.model.train_step(\n images=batch[\"data\"],\n targets={\n \"target_boxes\": batch[\"boxes\"],\n \"target_classes\": batch[\"classes\"],\n \"target_seg\": batch['target'][:, 0] # Remove channel dimension\n },\n evaluation=False,\n batch_num=batch_idx,\n )\n loss = sum(losses.values())\n return {\"loss\": loss, **{key: l.detach().item() for key, l in losses.items()}}\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n Computes a single validation step (same as train step but with\n additional prediciton processing)\n See :class:`BaseRetinaNet` for more information\n \"\"\"\n with torch.no_grad():\n batch = self.pre_trafo(**batch)\n targets = {\n \"target_boxes\": batch[\"boxes\"],\n \"target_classes\": batch[\"classes\"],\n \"target_seg\": batch['target'][:, 0] # Remove channel dimension\n }\n losses, prediction = self.model.train_step(\n images=batch[\"data\"],\n targets=targets,\n evaluation=True,\n batch_num=batch_idx,\n )\n loss = sum(losses.values())\n\n self.evaluation_step(prediction=prediction, targets=targets)\n return {\"loss\": loss.detach().item(),\n **{key: l.detach().item() for key, l in losses.items()}}\n\n def evaluation_step(\n self,\n prediction: dict,\n targets: dict,\n ):\n \"\"\"\n Perform an evaluation step to add predictions and gt to\n caching mechanism which is evaluated at the end of the epoch\n\n Args:\n prediction: predictions obtained from model\n 'pred_boxes': List[Tensor]: predicted bounding boxes for\n each image List[[R, dim * 2]]\n 'pred_scores': List[Tensor]: predicted probability for\n the class List[[R]]\n 'pred_labels': List[Tensor]: predicted class List[[R]]\n 'pred_seg': Tensor: predicted segmentation [N, dims]\n targets: ground truth\n `target_boxes` (List[Tensor]): ground truth bounding boxes\n (x1, y1, x2, y2, (z1, z2))[X, dim * 2], X= number of ground\n truth boxes in image\n `target_classes` (List[Tensor]): ground truth class per box\n (classes start from 0) [X], X= number of ground truth\n boxes in image\n `target_seg` (Tensor): segmentation ground truth (if seg was\n found in input dict)\n \"\"\"\n pred_boxes = to_numpy(prediction[\"pred_boxes\"])\n pred_classes = to_numpy(prediction[\"pred_labels\"])\n pred_scores = to_numpy(prediction[\"pred_scores\"])\n\n gt_boxes = to_numpy(targets[\"target_boxes\"])\n gt_classes = to_numpy(targets[\"target_classes\"])\n gt_ignore = None\n\n self.box_evaluator.run_online_evaluation(\n pred_boxes=pred_boxes,\n pred_classes=pred_classes,\n pred_scores=pred_scores,\n gt_boxes=gt_boxes,\n gt_classes=gt_classes,\n gt_ignore=gt_ignore,\n )\n\n pred_seg = to_numpy(prediction[\"pred_seg\"])\n gt_seg = to_numpy(targets[\"target_seg\"])\n\n self.seg_evaluator.run_online_evaluation(\n seg_probs=pred_seg,\n target=gt_seg,\n )\n\n def training_epoch_end(self, training_step_outputs):\n \"\"\"\n Log train loss to loguru logger\n \"\"\"\n # process and log losses\n vals = defaultdict(list)\n for _val in training_step_outputs:\n for _k, _v in _val.items():\n if _k == \"loss\":\n vals[_k].append(_v.detach().item())\n else:\n vals[_k].append(_v)\n\n for _key, _vals in vals.items():\n mean_val = np.mean(_vals)\n if _key == \"loss\":\n logger.info(f\"Train loss reached: {mean_val:0.5f}\")\n self.log(f\"train_{_key}\", mean_val, sync_dist=True)\n return super().training_epoch_end(training_step_outputs)\n\n def validation_epoch_end(self, validation_step_outputs):\n \"\"\"\n Log val loss to loguru logger\n \"\"\"\n # process and log losses\n vals = defaultdict(list)\n for _val in validation_step_outputs:\n for _k, _v in _val.items():\n vals[_k].append(_v)\n\n for _key, _vals in vals.items():\n mean_val = np.mean(_vals)\n if _key == \"loss\":\n logger.info(f\"Val loss reached: {mean_val:0.5f}\")\n self.log(f\"val_{_key}\", mean_val, sync_dist=True)\n\n # process and log metrics\n self.evaluation_end()\n return super().validation_epoch_end(validation_step_outputs)\n\n def evaluation_end(self):\n \"\"\"\n Uses the cached values from `evaluation_step` to perform the evaluation\n of the epoch\n \"\"\"\n metric_scores, _ = self.box_evaluator.finish_online_evaluation()\n self.box_evaluator.reset()\n\n logger.info(f\"[email protected]:0.5:0.05: {metric_scores['mAP_IoU_0.10_0.50_0.05_MaxDet_100']:0.3f} \"\n f\"[email protected]: {metric_scores['AP_IoU_0.10_MaxDet_100']:0.3f} \"\n f\"[email protected]: {metric_scores['AP_IoU_0.50_MaxDet_100']:0.3f}\")\n\n seg_scores, _ = self.seg_evaluator.finish_online_evaluation()\n self.seg_evaluator.reset()\n metric_scores.update(seg_scores)\n\n logger.info(f\"Proxy FG Dice: {seg_scores['seg_dice']:0.3f}\")\n\n for key, item in metric_scores.items():\n self.log(f'{key}', item, on_step=None, on_epoch=True, prog_bar=False, logger=True)\n\n def configure_optimizers(self):\n \"\"\"\n Configure optimizer and scheduler\n Base configuration is SGD with LinearWarmup and PolyLR learning rate\n schedule\n \"\"\"\n # configure optimizer\n logger.info(f\"Running: initial_lr {self.trainer_cfg['initial_lr']} \"\n f\"weight_decay {self.trainer_cfg['weight_decay']} \"\n f\"SGD with momentum {self.trainer_cfg['sgd_momentum']} and \"\n f\"nesterov {self.trainer_cfg['sgd_nesterov']}\")\n wd_groups = get_params_no_wd_on_norm(self, weight_decay=self.trainer_cfg['weight_decay'])\n optimizer = torch.optim.SGD(\n wd_groups,\n self.trainer_cfg[\"initial_lr\"],\n weight_decay=self.trainer_cfg[\"weight_decay\"],\n momentum=self.trainer_cfg[\"sgd_momentum\"],\n nesterov=self.trainer_cfg[\"sgd_nesterov\"],\n )\n\n # configure lr scheduler\n num_iterations = self.trainer_cfg[\"max_num_epochs\"] * \\\n self.trainer_cfg[\"num_train_batches_per_epoch\"]\n scheduler = LinearWarmupPolyLR(\n optimizer=optimizer,\n warm_iterations=self.trainer_cfg[\"warm_iterations\"],\n warm_lr=self.trainer_cfg[\"warm_lr\"],\n poly_gamma=self.trainer_cfg[\"poly_gamma\"],\n num_iterations=num_iterations\n )\n return [optimizer], {'scheduler': scheduler, 'interval': 'step'}\n\n @classmethod\n def from_config_plan(cls,\n model_cfg: dict,\n plan_arch: dict,\n plan_anchors: dict,\n log_num_anchors: str = None,\n **kwargs,\n ):\n \"\"\"\n Create Configurable RetinaUNet\n\n Args:\n model_cfg: model configurations\n See example configs for more info\n plan_arch: plan architecture\n `dim` (int): number of spatial dimensions\n `in_channels` (int): number of input channels\n `classifier_classes` (int): number of classes\n `seg_classes` (int): number of classes\n `start_channels` (int): number of start channels in encoder\n `fpn_channels` (int): number of channels to use for FPN\n `head_channels` (int): number of channels to use for head\n `decoder_levels` (int): decoder levels to user for detection\n plan_anchors: parameters for anchors (see\n :class:`AnchorGenerator` for more info)\n `stride`: stride\n `aspect_ratios`: aspect ratios\n `sizes`: sized for 2d acnhors\n (`zsizes`: additional z sizes for 3d)\n log_num_anchors: name of logger to use; if None, no logging\n will be performed\n **kwargs:\n \"\"\"\n logger.info(f\"Architecture overwrites: {model_cfg['plan_arch_overwrites']} \"\n f\"Anchor overwrites: {model_cfg['plan_anchors_overwrites']}\")\n logger.info(f\"Building architecture according to plan of {plan_arch.get('arch_name', 'not_found')}\")\n plan_arch.update(model_cfg[\"plan_arch_overwrites\"])\n plan_anchors.update(model_cfg[\"plan_anchors_overwrites\"])\n logger.info(f\"Start channels: {plan_arch['start_channels']}; \"\n f\"head channels: {plan_arch['head_channels']}; \"\n f\"fpn channels: {plan_arch['fpn_channels']}\")\n\n _plan_anchors = copy.deepcopy(plan_anchors)\n coder = BoxCoderND(weights=(1.,) * (plan_arch[\"dim\"] * 2))\n s_param = False if (\"aspect_ratios\" in _plan_anchors) and \\\n (_plan_anchors[\"aspect_ratios\"] is not None) else True\n anchor_generator = get_anchor_generator(\n plan_arch[\"dim\"], s_param=s_param)(**_plan_anchors)\n\n encoder = cls._build_encoder(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n )\n decoder = cls._build_decoder(\n encoder=encoder,\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n )\n matcher = cls.matcher_cls(\n similarity_fn=box_iou,\n **model_cfg[\"matcher_kwargs\"],\n )\n\n classifier = cls._build_head_classifier(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n anchor_generator=anchor_generator,\n )\n regressor = cls._build_head_regressor(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n anchor_generator=anchor_generator,\n )\n head = cls._build_head(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n classifier=classifier,\n regressor=regressor,\n coder=coder\n )\n segmenter = cls._build_segmenter(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n decoder=decoder,\n )\n\n detections_per_img = plan_arch.get(\"detections_per_img\", 100)\n score_thresh = plan_arch.get(\"score_thresh\", 0)\n topk_candidates = plan_arch.get(\"topk_candidates\", 10000)\n remove_small_boxes = plan_arch.get(\"remove_small_boxes\", 0.01)\n nms_thresh = plan_arch.get(\"nms_thresh\", 0.6)\n\n logger.info(f\"Model Inference Summary: \\n\"\n f\"detections_per_img: {detections_per_img} \\n\"\n f\"score_thresh: {score_thresh} \\n\"\n f\"topk_candidates: {topk_candidates} \\n\"\n f\"remove_small_boxes: {remove_small_boxes} \\n\"\n f\"nms_thresh: {nms_thresh}\",\n )\n\n return BaseRetinaNet(\n dim=plan_arch[\"dim\"],\n encoder=encoder,\n decoder=decoder,\n head=head,\n anchor_generator=anchor_generator,\n matcher=matcher,\n num_classes=plan_arch[\"classifier_classes\"],\n decoder_levels=plan_arch[\"decoder_levels\"],\n segmenter=segmenter,\n # model_max_instances_per_batch_element (in mdt per img, per class; here: per img)\n detections_per_img=detections_per_img,\n score_thresh=score_thresh,\n topk_candidates=topk_candidates,\n remove_small_boxes=remove_small_boxes,\n nms_thresh=nms_thresh,\n )\n\n @classmethod\n def _build_encoder(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n ) -> EncoderType:\n \"\"\"\n Build encoder network\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n\n Returns:\n EncoderType: encoder instance\n \"\"\"\n conv = Generator(cls.base_conv_cls, plan_arch[\"dim\"])\n logger.info(f\"Building:: encoder {cls.encoder_cls.__name__}: {model_cfg['encoder_kwargs']} \")\n encoder = cls.encoder_cls(\n conv=conv,\n conv_kernels=plan_arch[\"conv_kernels\"],\n strides=plan_arch[\"strides\"],\n block_cls=cls.block,\n in_channels=plan_arch[\"in_channels\"],\n start_channels=plan_arch[\"start_channels\"],\n stage_kwargs=None,\n max_channels=plan_arch.get(\"max_channels\", 320),\n **model_cfg['encoder_kwargs'],\n )\n return encoder\n\n @classmethod\n def _build_decoder(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n encoder: EncoderType,\n ) -> DecoderType:\n \"\"\"\n Build decoder network\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n\n Returns:\n DecoderType: decoder instance\n \"\"\"\n conv = Generator(cls.base_conv_cls, plan_arch[\"dim\"])\n logger.info(f\"Building:: decoder {cls.decoder_cls.__name__}: {model_cfg['decoder_kwargs']}\")\n decoder = cls.decoder_cls(\n conv=conv,\n conv_kernels=plan_arch[\"conv_kernels\"],\n strides=encoder.get_strides(),\n in_channels=encoder.get_channels(),\n decoder_levels=plan_arch[\"decoder_levels\"],\n fixed_out_channels=plan_arch[\"fpn_channels\"],\n **model_cfg['decoder_kwargs'],\n )\n return decoder\n\n @classmethod\n def _build_head_classifier(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n anchor_generator: AnchorGeneratorType,\n ) -> ClassifierType:\n \"\"\"\n Build classification subnetwork for detection head\n\n Args:\n anchor_generator: anchor generator instance\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n\n Returns:\n ClassifierType: classification instance\n \"\"\"\n conv = Generator(cls.head_conv_cls, plan_arch[\"dim\"])\n name = cls.head_classifier_cls.__name__\n kwargs = model_cfg['head_classifier_kwargs']\n\n logger.info(f\"Building:: classifier {name}: {kwargs}\")\n classifier = cls.head_classifier_cls(\n conv=conv,\n in_channels=plan_arch[\"fpn_channels\"],\n internal_channels=plan_arch[\"head_channels\"],\n num_classes=plan_arch[\"classifier_classes\"],\n anchors_per_pos=anchor_generator.num_anchors_per_location()[0],\n num_levels=len(plan_arch[\"decoder_levels\"]),\n **kwargs,\n )\n return classifier\n\n @classmethod\n def _build_head_regressor(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n anchor_generator: AnchorGeneratorType,\n ) -> RegressorType:\n \"\"\"\n Build regression subnetwork for detection head\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n anchor_generator: anchor generator instance\n\n Returns:\n RegressorType: classification instance\n \"\"\"\n conv = Generator(cls.head_conv_cls, plan_arch[\"dim\"])\n name = cls.head_regressor_cls.__name__\n kwargs = model_cfg['head_regressor_kwargs']\n\n logger.info(f\"Building:: regressor {name}: {kwargs}\")\n regressor = cls.head_regressor_cls(\n conv=conv,\n in_channels=plan_arch[\"fpn_channels\"],\n internal_channels=plan_arch[\"head_channels\"],\n anchors_per_pos=anchor_generator.num_anchors_per_location()[0],\n num_levels=len(plan_arch[\"decoder_levels\"]),\n **kwargs,\n )\n return regressor\n\n @classmethod\n def _build_head(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n classifier: ClassifierType,\n regressor: RegressorType,\n coder: CoderType,\n ) -> HeadType:\n \"\"\"\n Build detection head\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n classifier: classifier instance\n regressor: regressor instance\n coder: coder instance to encode boxes\n\n Returns:\n HeadType: instantiated head\n \"\"\"\n head_name = cls.head_cls.__name__\n head_kwargs = model_cfg['head_kwargs']\n sampler_name = cls.head_sampler_cls.__name__\n sampler_kwargs = model_cfg['head_sampler_kwargs']\n\n logger.info(f\"Building:: head {head_name}: {head_kwargs} \"\n f\"sampler {sampler_name}: {sampler_kwargs}\")\n sampler = cls.head_sampler_cls(**sampler_kwargs)\n head = cls.head_cls(\n classifier=classifier,\n regressor=regressor,\n coder=coder,\n sampler=sampler,\n log_num_anchors=None,\n **head_kwargs,\n )\n return head\n\n @classmethod\n def _build_segmenter(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n decoder: DecoderType,\n ) -> SegmenterType:\n \"\"\"\n Build segmenter head\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n decoder: decoder instance\n\n Returns:\n SegmenterType: segmenter head\n \"\"\"\n if cls.segmenter_cls is not None:\n name = cls.segmenter_cls.__name__\n kwargs = model_cfg['segmenter_kwargs']\n conv = Generator(cls.base_conv_cls, plan_arch[\"dim\"])\n\n logger.info(f\"Building:: segmenter {name} {kwargs}\")\n segmenter = cls.segmenter_cls(\n conv,\n seg_classes=plan_arch[\"seg_classes\"],\n in_channels=decoder.get_channels(),\n decoder_levels=plan_arch[\"decoder_levels\"],\n **kwargs,\n )\n else:\n segmenter = None\n return segmenter\n\n @staticmethod\n def get_ensembler_cls(key: Hashable, dim: int) -> Callable:\n \"\"\"\n Get ensembler classes to combine multiple predictions\n Needs to be overwritten in subclasses!\n \"\"\"\n _lookup = {\n 2: {\n \"boxes\": None,\n \"seg\": None,\n },\n 3: {\n \"boxes\": BoxEnsemblerSelective,\n \"seg\": SegmentationEnsembler,\n }\n }\n if dim == 2:\n raise NotImplementedError\n return _lookup[dim][key]\n\n @classmethod\n def get_predictor(cls,\n plan: Dict,\n models: Sequence[RetinaUNetModule],\n num_tta_transforms: int = None,\n do_seg: bool = False,\n **kwargs,\n ) -> Predictor:\n # process plan\n crop_size = plan[\"patch_size\"]\n batch_size = plan[\"batch_size\"]\n inferene_plan = plan.get(\"inference_plan\", {})\n logger.info(f\"Found inference plan: {inferene_plan} for prediction\")\n if num_tta_transforms is None:\n num_tta_transforms = 8 if plan[\"network_dim\"] == 3 else 4\n\n # setup\n tta_transforms, tta_inverse_transforms = \\\n get_tta_transforms(num_tta_transforms, True)\n logger.info(f\"Using {len(tta_transforms)} tta transformations for prediction (one dummy trafo).\")\n\n ensembler = {\"boxes\": partial(\n cls.get_ensembler_cls(key=\"boxes\", dim=plan[\"network_dim\"]).from_case,\n parameters=inferene_plan,\n )}\n if do_seg:\n ensembler[\"seg\"] = partial(\n cls.get_ensembler_cls(key=\"seg\", dim=plan[\"network_dim\"]).from_case,\n )\n\n predictor = Predictor(\n ensembler=ensembler,\n models=models,\n crop_size=crop_size,\n tta_transforms=tta_transforms,\n tta_inverse_transforms=tta_inverse_transforms,\n batch_size=batch_size,\n **kwargs,\n )\n if plan[\"network_dim\"] == 2:\n raise NotImplementedError\n predictor.pre_transform = Inference2D([\"data\"])\n return predictor\n\n def sweep(self,\n cfg: dict,\n save_dir: os.PathLike,\n train_data_dir: os.PathLike,\n case_ids: Sequence[str],\n run_prediction: bool = True,\n **kwargs,\n ) -> Dict[str, Any]:\n \"\"\"\n Sweep detection parameters to find the best predictions\n\n Args:\n cfg: config used for training\n save_dir: save dir used for training\n train_data_dir: directory where preprocessed training/validation\n data is located\n case_ids: case identifies to prepare and predict\n run_prediction: predict cases\n **kwargs: keyword arguments passed to predict function\n\n Returns:\n Dict: inference plan\n e.g. (exact params depend on ensembler class usef for prediction)\n `iou_thresh` (float): best IoU threshold\n `score_thresh (float)`: best score threshold\n `no_overlap` (bool): enable/disable class independent NMS (ciNMS)\n \"\"\"\n logger.info(f\"Running parameter sweep on {case_ids}\")\n\n train_data_dir = Path(train_data_dir)\n preprocessed_dir = train_data_dir.parent\n processed_eval_labels = preprocessed_dir / \"labelsTr\"\n\n _save_dir = save_dir / \"sweep\"\n _save_dir.mkdir(parents=True, exist_ok=True)\n\n prediction_dir = save_dir / \"sweep_predictions\"\n prediction_dir.mkdir(parents=True, exist_ok=True)\n\n if run_prediction:\n logger.info(\"Predict cases with default settings...\")\n predictor = predict_dir(\n source_dir=train_data_dir,\n target_dir=prediction_dir,\n cfg=cfg,\n plan=self.plan,\n source_models=save_dir,\n num_models=1,\n num_tta_transforms=None,\n case_ids=case_ids,\n save_state=True,\n model_fn=get_loader_fn(mode=self.trainer_cfg.get(\"sweep_ckpt\", \"last\")),\n **kwargs,\n )\n\n logger.info(\"Start parameter sweep...\")\n ensembler_cls = self.get_ensembler_cls(key=\"boxes\", dim=self.plan[\"network_dim\"])\n sweeper = BoxSweeper(\n classes=[item for _, item in cfg[\"data\"][\"labels\"].items()],\n pred_dir=prediction_dir,\n gt_dir=processed_eval_labels,\n target_metric=self.eval_score_key,\n ensembler_cls=ensembler_cls,\n save_dir=_save_dir,\n )\n inference_plan = sweeper.run_postprocessing_sweep()\n return inference_plan\n"
] | [
[
"torch.no_grad",
"torch.optim.SGD",
"numpy.mean"
]
] |
isabella232/nnabla | [
"82a3c6fed382f889d1a4a429c696bb8cedf6ce79"
] | [
"python/test/function/test_affine.py"
] | [
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\n\nctxs = list_context('Affine')\n\n\ndef ref_affine(x, w, b, base_axis):\n shape = list(x.shape[:base_axis])\n shape += [-1]\n out_shape = w.shape[1:]\n y = np.dot(x.reshape(*shape), w.reshape(w.shape[0], -1))\n if b is not None:\n y += b.reshape((1,) * (len(shape) - 1) + (-1,))\n return y.reshape(tuple(shape[:-1]) + tuple(out_shape))\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected](\"base_axis, weight_shape\",\n [(1, (12, 2, 3)), (2, (4, 4)), (-1, (4, 4)), (-2, (12, 3, 4))])\[email protected](\"bias\", [True, False])\ndef test_affine_forward_backward(seed, base_axis, weight_shape, bias,\n ctx, func_name):\n\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n # Input\n inputs = [rng.randn(2, 3, 4).astype(np.float32)]\n # Weight\n inputs += [rng.randn(*weight_shape).astype(np.float32)]\n # Bias\n if bias:\n inputs += [rng.randn(*weight_shape[1:]).astype(np.float32)]\n else:\n inputs += [None]\n function_tester(rng, F.affine, ref_affine, inputs, func_args=[base_axis],\n atol_b=1e-2, dstep=1e-3, ctx=ctx, func_name=func_name)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected](\"base_axis, weight_shape\",\n [(1, (12, 3, 4)), (2, (4, 4)), (-1, (4, 4)), (-2, (12, 3, 4))])\[email protected](\"bias\", [True, False])\ndef test_affine_double_backward(seed, base_axis, weight_shape, bias,\n ctx, func_name):\n\n from nbla_test_utils import backward_function_tester, grad_function_forward_function_output\n from nnabla.backward_function.affine import AffineDataGrad, AffineFilterGrad\n rng = np.random.RandomState(seed)\n # Input\n inputs = [rng.randn(2, 3, 4).astype(np.float32)]\n # Weight\n inputs += [rng.randn(*weight_shape).astype(np.float32)]\n # Bias\n if bias:\n inputs += [rng.randn(*weight_shape[1:]).astype(np.float32) * 1e2]\n else:\n inputs += [None]\n func_args = [base_axis]\n # Affine\n backward_function_tester(rng, F.affine, inputs, func_args=func_args,\n dstep=1e-3, ctx=ctx)\n # DataGrad\n df, y = grad_function_forward_function_output(AffineDataGrad,\n F.affine, ctx, inputs, *func_args)\n df.xshape = inputs[0].shape\n ginputs = [rng.randn(*y.shape), inputs[1]]\n backward_function_tester(rng, df, ginputs, func_args=[],\n atol_accum=2e-2, dstep=1e-3, ctx=ctx, non_accum_check=True)\n\n # FilterGrad\n df, y = grad_function_forward_function_output(AffineFilterGrad,\n F.affine, ctx, inputs, *func_args)\n df.wshape = inputs[1].shape\n ginputs = [rng.randn(*y.shape), inputs[0]]\n backward_function_tester(rng, df, ginputs, func_args=[],\n dstep=1e-3, ctx=ctx, non_accum_check=True)\n"
] | [
[
"numpy.random.RandomState"
]
] |
cTatu/fracdiff | [
"0ee3967b98ab2e5d67dc72cc21a2543f4dc5b113"
] | [
"tests/test_fracdiffstat.py"
] | [
"import numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom fracdiff import Fracdiff\nfrom fracdiff import FracdiffStat\nfrom fracdiff.stat import StatTester\n\n\nclass TestFracdiffStat:\n \"\"\"\n Test `FracdiffStat`.\n \"\"\"\n\n @staticmethod\n def _is_stat(x):\n return StatTester().is_stat(x)\n\n @pytest.mark.parametrize(\"window\", [10])\n @pytest.mark.parametrize(\"mode\", [\"full\", \"valid\"])\n @pytest.mark.parametrize(\"precision\", [0.01])\n @pytest.mark.parametrize(\"n_jobs\", [None, -1])\n def test_order(self, window, mode, precision, n_jobs):\n np.random.seed(42)\n X = np.random.randn(1000, 10).cumsum(0)\n\n fs = FracdiffStat(mode=mode, window=window, precision=precision, n_jobs=n_jobs)\n fs.fit(X)\n\n X_st = fs.transform(X)\n X_ns = np.empty_like(X_st[:, :0])\n\n for i in range(X.shape[1]):\n f = Fracdiff(fs.d_[i] - precision, mode=mode, window=window)\n X_ns = np.concatenate((X_ns, f.fit_transform(X[:, [i]])), 1)\n\n for i in range(X.shape[1]):\n assert self._is_stat(X_st[:, i])\n assert not self._is_stat(X_ns[:, i])\n\n @pytest.mark.parametrize(\"window\", [10])\n def test_lower_is_stat(self, window):\n \"\"\"\n Test if `StationarityFracdiff.fit` returns `lower`\n if `lower`th differenciation is already stationary.\n \"\"\"\n np.random.seed(42)\n X = np.random.randn(100, 1)\n\n f = FracdiffStat(window=window, lower=0.0).fit(X)\n\n assert f.d_[0] == 0.0\n\n @pytest.mark.parametrize(\"window\", [10])\n def test_upper_is_not_stat(self, window):\n \"\"\"\n Test if `StationarityFracdiff.fit` returns `np.nan`\n if `upper`th differenciation is still non-stationary.\n \"\"\"\n np.random.seed(42)\n X = np.random.randn(100, 1).cumsum(0)\n\n f = FracdiffStat(window=window, upper=0.0, lower=-1.0).fit(X)\n\n assert np.isnan(f.d_[0])\n\n @pytest.mark.parametrize(\"window\", [10])\n @pytest.mark.parametrize(\"mode\", [\"full\", \"valid\"])\n @pytest.mark.parametrize(\"precision\", [0.01])\n @pytest.mark.parametrize(\"n_jobs\", [None, -1])\n def test_transform(self, window, mode, precision, n_jobs):\n \"\"\"\n Test if `FracdiffStat.transform` works\n for array with n_features > 1.\n \"\"\"\n np.random.seed(42)\n X = np.random.randn(100, 10).cumsum(0)\n\n fs = FracdiffStat(\n window=window, mode=mode, precision=precision, n_jobs=n_jobs\n ).fit(X)\n out = fs.transform(X)\n\n exp = np.empty_like(out[:, :0])\n for i in range(X.shape[1]):\n f = Fracdiff(fs.d_[i], mode=mode, window=window)\n exp = np.concatenate((exp, f.fit_transform(X[:, [i]])), 1)\n\n assert_allclose(out, exp)\n"
] | [
[
"numpy.random.seed",
"numpy.random.randn",
"numpy.empty_like",
"numpy.testing.assert_allclose",
"numpy.isnan"
]
] |
function2-llx/MONAI | [
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4"
] | [
"tests/test_hilbert_transform.py"
] | [
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.networks.layers import HilbertTransform\nfrom monai.utils import OptionalImportError\nfrom tests.utils import SkipIfModule, SkipIfNoModule, skip_if_no_cuda\n\n\ndef create_expected_numpy_output(input_datum, **kwargs):\n\n x = np.fft.fft(input_datum.cpu().numpy() if input_datum.device.type == \"cuda\" else input_datum.numpy(), **kwargs)\n f = np.fft.fftfreq(x.shape[kwargs[\"axis\"]])\n u = np.heaviside(f, 0.5)\n new_dims_before = kwargs[\"axis\"]\n new_dims_after = len(x.shape) - kwargs[\"axis\"] - 1\n for _ in range(new_dims_before):\n u = np.expand_dims(u, 0)\n for _ in range(new_dims_after):\n u = np.expand_dims(u, -1)\n ht = np.fft.ifft(x * 2 * u, axis=kwargs[\"axis\"])\n\n return ht\n\n\ncpu = torch.device(\"cpu\")\nn_samples = 500\nhann_windowed_sine = np.sin(2 * np.pi * 10 * np.linspace(0, 1, n_samples)) * np.hanning(n_samples)\n\n# CPU TEST DATA\n\ncpu_input_data = {}\ncpu_input_data[\"1D\"] = torch.as_tensor(hann_windowed_sine, device=cpu).unsqueeze(0).unsqueeze(0)\ncpu_input_data[\"2D\"] = (\n torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=cpu).unsqueeze(0).unsqueeze(0)\n)\ncpu_input_data[\"3D\"] = (\n torch.as_tensor(np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=cpu)\n .unsqueeze(0)\n .unsqueeze(0)\n)\ncpu_input_data[\"1D 2CH\"] = torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=cpu).unsqueeze(0)\ncpu_input_data[\"2D 2CH\"] = torch.as_tensor(\n np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=cpu\n).unsqueeze(0)\n\n# SINGLE-CHANNEL CPU VALUE TESTS\n\nTEST_CASE_1D_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"1D\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"1D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n]\n\nTEST_CASE_2D_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"2D\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"2D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n]\n\nTEST_CASE_3D_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"3D\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"3D\"], axis=2),\n 1e-5, # absolute tolerance\n]\n\n# MULTICHANNEL CPU VALUE TESTS, PROCESS ALONG FIRST SPATIAL AXIS\n\nTEST_CASE_1D_2CH_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"1D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"1D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n]\n\nTEST_CASE_2D_2CH_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"2D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"2D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n]\n\n# GPU TEST DATA\n\nif torch.cuda.is_available():\n gpu = torch.device(\"cuda\")\n\n gpu_input_data = {}\n gpu_input_data[\"1D\"] = torch.as_tensor(hann_windowed_sine, device=gpu).unsqueeze(0).unsqueeze(0)\n gpu_input_data[\"2D\"] = (\n torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=gpu).unsqueeze(0).unsqueeze(0)\n )\n gpu_input_data[\"3D\"] = (\n torch.as_tensor(np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=gpu)\n .unsqueeze(0)\n .unsqueeze(0)\n )\n gpu_input_data[\"1D 2CH\"] = torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=gpu).unsqueeze(0)\n gpu_input_data[\"2D 2CH\"] = torch.as_tensor(\n np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=gpu\n ).unsqueeze(0)\n\n # SINGLE CHANNEL GPU VALUE TESTS\n\n TEST_CASE_1D_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"1D\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"1D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n ]\n\n TEST_CASE_2D_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"2D\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"2D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n ]\n\n TEST_CASE_3D_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"3D\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"3D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n ]\n\n # MULTICHANNEL GPU VALUE TESTS, PROCESS ALONG FIRST SPATIAL AXIS\n\n TEST_CASE_1D_2CH_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"1D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"1D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n ]\n\n TEST_CASE_2D_2CH_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"2D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"2D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n ]\n\n# TESTS CHECKING PADDING, AXIS SELECTION ETC ARE COVERED BY test_detect_envelope.py\n\n\n@SkipIfNoModule(\"torch.fft\")\nclass TestHilbertTransformCPU(unittest.TestCase):\n @parameterized.expand(\n [\n TEST_CASE_1D_SINE_CPU,\n TEST_CASE_2D_SINE_CPU,\n TEST_CASE_3D_SINE_CPU,\n TEST_CASE_1D_2CH_SINE_CPU,\n TEST_CASE_2D_2CH_SINE_CPU,\n ]\n )\n def test_value(self, arguments, image, expected_data, atol):\n result = HilbertTransform(**arguments)(image)\n result = result.squeeze(0).squeeze(0).numpy()\n np.testing.assert_allclose(result, expected_data.squeeze(), atol=atol)\n\n\n@skip_if_no_cuda\n@SkipIfNoModule(\"torch.fft\")\nclass TestHilbertTransformGPU(unittest.TestCase):\n @parameterized.expand(\n []\n if not torch.cuda.is_available()\n else [\n TEST_CASE_1D_SINE_GPU,\n TEST_CASE_2D_SINE_GPU,\n TEST_CASE_3D_SINE_GPU,\n TEST_CASE_1D_2CH_SINE_GPU,\n TEST_CASE_2D_2CH_SINE_GPU,\n ],\n skip_on_empty=True,\n )\n def test_value(self, arguments, image, expected_data, atol):\n result = HilbertTransform(**arguments)(image)\n result = result.squeeze(0).squeeze(0).cpu().numpy()\n np.testing.assert_allclose(result, expected_data.squeeze(), atol=atol)\n\n\n@SkipIfModule(\"torch.fft\")\nclass TestHilbertTransformNoFFTMod(unittest.TestCase):\n def test_no_fft_module_error(self):\n self.assertRaises(OptionalImportError, HilbertTransform(), torch.randn(1, 1, 10))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.heaviside",
"torch.randn",
"torch.as_tensor",
"numpy.fft.ifft",
"torch.cuda.is_available",
"numpy.expand_dims",
"numpy.stack",
"numpy.linspace",
"torch.device",
"numpy.fft.fftfreq",
"numpy.hanning"
]
] |
HoliestCow/ece692_deeplearning | [
"638c27e0d9c01ec9b0a8be8a85e54937645a897e"
] | [
"project1/multiprocess.py"
] | [
"import os\nimport sys\nimport multiprocessing\nimport copyreg\nimport types\nimport time\nfrom six import string_types\nfrom progressbar import ProgressBar, FormatLabel, Percentage, Bar, ETA\nimport numpy as np\nimport pandas as pd\nfrom subprocess import Popen\nfrom collections import OrderedDict\nfrom basic_class import BasicClass\n\n\nMAX_NCORES = multiprocessing.cpu_count()\nSAFE_NCORES = MAX_NCORES - 2\n\n\n# -----------------------------------------------------------------------------\n# This is a trick to allow multiprocessing to use target functions that are\n# object methods. This is used for the algorithms which are trained and then\n# evaluations are completed inside of MP threads\n# -----------------------------------------------------------------------------\ndef _pickle_method(m):\n if m.im_self is None:\n return getattr, (m.im_class, m.im_func.func_name)\n else:\n return getattr, (m.im_self, m.im_func.func_name)\n\n\ncopyreg.pickle(types.MethodType, _pickle_method)\n\n\ndef _mute_stdout():\n sys.stdout = open(os.devnull, 'w')\n\n\nclass _MultiProcessor(BasicClass):\n\n def __init__(self, ncores=SAFE_NCORES):\n self.ncores = ncores\n\n @property\n def ncores(self):\n return self._ncores\n\n @ncores.setter\n def ncores(self, x):\n if x is None:\n ncores = self._ncores\n elif isinstance(x, string_types):\n if x.lower() == 'max':\n ncores = MAX_NCORES\n elif x.lower() == 'safe':\n ncores = SAFE_NCORES\n elif x.isdigit():\n ncores = int(x)\n else:\n raise ValueError('Unrecognized `ncores`: {}'.format(x))\n else:\n ncores = int(x)\n if ncores <= 0:\n raise ValueError('`ncores` must be positive: {}'.format(ncores))\n if ncores > MAX_NCORES:\n raise ValueError(\n 'ncores={} exceeds MAX_NCORES={}'.format(ncores, MAX_NCORES))\n self._ncores = ncores\n self._print('Using {} cores'.format(self.ncores))\n\n\nclass PyMultiProcessor(_MultiProcessor):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n copyreg.pickle(types.MethodType, _pickle_method)\n self.reset_cmds()\n\n def reset_cmds(self):\n self.funcs = []\n self.args = []\n self.num_cmds = 0\n\n def add_func_and_args(self, func, args):\n \"\"\"\n Add one function and dict of args to be processed\n \"\"\"\n assert isinstance(args, dict), '`args` not dict: {}'.format(args)\n self.funcs.append(func)\n self.args.append(args)\n self.num_cmds += 1\n\n def run_processes(self, ncores=None, concat=False,\n mute_stdout=True):\n \"\"\"\n Run all functions and args added via `add_func_and_args`\n If `ncores=1` multiprocessing is not used.\n If `concat` the results will be attempted to be concatenated along\n axis=1 (column-wise)\n \"\"\"\n self.ncores = ncores\n # ProgressBar stuff\n widgets = [\n FormatLabel('Processed: %(value)d of {} '.format(self.num_cmds)),\n Percentage(),\n Bar(),\n ETA()]\n # Storage\n results = []\n # Single process\n if self.ncores == 1:\n pbar = ProgressBar(widgets=widgets, maxval=self.num_cmds).start()\n for i, (f, a) in enumerate(zip(self.funcs, self.args)):\n results.append(f(**a))\n pbar.update(i + 1)\n pbar.finish()\n # Multiprocess\n else:\n pbar = ProgressBar(widgets=widgets, maxval=self.num_cmds).start()\n if mute_stdout:\n self.pool = multiprocessing.Pool(processes=self.ncores,\n initializer=_mute_stdout)\n else:\n self.pool = multiprocessing.Pool(processes=self.ncores)\n procs = []\n # Start procs\n for i, (f, a) in enumerate(zip(self.funcs, self.args)):\n procs.append(self.pool.apply_async(f, (), a))\n # Wait for and collect results\n for i, p in enumerate(procs):\n results.append(p.get())\n pbar.update(i + 1)\n pbar.finish()\n self.pool.close()\n if concat:\n # Concat dataframes?\n if all([isinstance(x, pd.DataFrame) for x in results]):\n self.results = pd.concat(results, axis=0)\n self.results.sort_index(inplace=True)\n # Concat dicts of arrays?\n elif all([isinstance(x, dict) for x in results]):\n self.results = OrderedDict()\n # Commented this to make it work.\n # for result in results:\n # for k in result:\n # if not isinstance(result[k], np.ndarray):\n # result[k] = np.array(result[k])\n # append_to_dict_list(self.results, k, result[k])\n for k in self.results:\n self.results[k] = np.concatenate(results[k], axis=0)\n else:\n self.results = results\n else:\n self.results = results\n # Cleanup\n self.reset_cmds()\n return self.results\n\n\ndef run_process(cmd=[], log_fname=None, **kwargs):\n \"\"\"\n cmd_dict is a dictionary of the command line command and arguments,\n log filename and the process number (ID)\n \"\"\"\n # Starting time\n start_time = time.time()\n # Discard stdout to /dev/null\n if (log_fname == 'null') or (log_fname is False):\n proc = Popen(cmd, stdout=open(os.devnull, 'w'))\n # Print stdout normally\n elif log_fname is None:\n proc = Popen(cmd)\n # Save stdout to file\n else:\n proc = Popen(cmd, stdout=open(log_fname, 'w'))\n # Wait for command to finish...\n proc.wait()\n end_time = time.time()\n return {'pid': proc.pid,\n 'cmd': ' '.join(cmd),\n 'start_time': start_time,\n 'end_time': end_time,\n 'duration_sec': end_time - start_time}\n"
] | [
[
"numpy.concatenate",
"pandas.concat"
]
] |
Minys233/GCN-BMP | [
"21b64a3c8cc9bc33718ae09c65aa917e575132eb"
] | [
"train_binary.py"
] | [
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 12/8/2018 6:54 PM\r\n# @Author : chinshin\r\n# @FileName: train_ddi.py\r\n\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\n\r\nimport os\r\nimport sys\r\nimport random\r\nimport chainer\r\nimport logging\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('AGG')\r\nfrom chainer.backends import cuda\r\nfrom chainer import functions as F\r\nfrom chainer import links\r\nfrom chainer import optimizers\r\nfrom chainer import training\r\nfrom chainer.iterators import SerialIterator\r\nfrom chainer.training import extensions as E\r\nfrom chainer.training import triggers\r\nfrom chainer_chemistry.links import GraphLinear\r\nfrom argparse import ArgumentParser\r\nfrom os.path import dirname, abspath\r\nROOT_PATH = dirname(dirname(dirname(abspath(__file__))))\r\nsys.path.insert(0, ROOT_PATH)\r\nfrom parsers import CSVFileParserForPair\r\nfrom chainer_chemistry.dataset.preprocessors import preprocess_method_dict\r\nfrom chainer_chemistry.dataset.converters import concat_mols\r\nfrom chainer_chemistry.datasets import NumpyTupleDataset\r\nfrom chainer_chemistry.training.extensions import ROCAUCEvaluator, PRCAUCEvaluator, PrecisionEvaluator, RecallEvaluator, F1Evaluator, AccuracyEvaluator\r\nfrom chainer_chemistry.models import MLP, SchNet, WeaveNet, RSGCN, Regressor, Classifier, Cosine\r\n# 稳定版本\r\n# from models.chin_ggnn import GGNN\r\n# from models.ggnn_dev import GGNN\r\nfrom models.ggnn_att import GGNN\r\nfrom models.models import NFP\r\n# 引入共注意力机制\r\nfrom models.coattention.alternating_coattention import AlternatingCoattention\r\nfrom models.coattention.parallel_coattention import ParallelCoattention, CircularParallelCoattention\r\nfrom models.coattention.vqa_parallel_coattention import VQAParallelCoattention\r\nfrom models.coattention.PoolingFineCoattention import PoolingFineCoattention\r\nfrom models.coattention.lt_fine_coattention import LinearTransformFineCoattention\r\nfrom models.coattention.nie_coattention import NieFineCoattention, DeepNieFineCoattention, FourierFineCoattention, VeryDeepNieFineCoattention, \\\r\n ExtremeDeepNieFineCoattention\r\nfrom models.coattention.bimpm import BiMPM\r\n# from models.ggnn_dev_self_loop import GGNN\r\nfrom chainer_chemistry.models import NTN, SymMLP, HolE, DistMult\r\n\r\nlogging.basicConfig(format='%(asctime)s: %(filename)s: %(funcName)s: %(lineno)d: %(message)s', level=logging.INFO)\r\nglobal_seed = 2018\r\nrandom.seed(global_seed)\r\n\r\n\r\nclass GraphConvPredictorForPair(chainer.Chain):\r\n def __init__(self, graph_conv, attn=None, mlp=None, symmetric=None):\r\n \"\"\"Initializes the graph convolution predictor.\r\n\r\n Args:\r\n graph_conv: The graph convolution network required to obtain\r\n molecule feature representation.\r\n mlp: Multi layer perceptron; used as the final fully connected\r\n layer. Set it to `None` if no operation is necessary\r\n after the `graph_conv` calculation.\r\n \"\"\"\r\n\r\n super(GraphConvPredictorForPair, self).__init__()\r\n with self.init_scope():\r\n self.graph_conv = graph_conv\r\n if isinstance(mlp, chainer.Link):\r\n self.mlp = mlp\r\n if isinstance(attn, chainer.Link):\r\n self.attn = attn\r\n if not isinstance(mlp, chainer.Link):\r\n self.mlp = mlp\r\n if not isinstance(attn, chainer.Link):\r\n self.attn = attn\r\n self.symmetric = symmetric\r\n\r\n def __call__(self, atoms_1, adjs_1, atoms_2, adjs_2):\r\n if self.xp == cuda.cupy:\r\n atoms_1 = cuda.to_gpu(atoms_1)\r\n adjs_1 = cuda.to_gpu(adjs_1)\r\n atoms_2 = cuda.to_gpu(atoms_2)\r\n adjs_2 = cuda.to_gpu(adjs_2)\r\n\r\n g1 = self.graph_conv(atoms_1, adjs_1)\r\n atoms_1 = self.graph_conv.get_atom_array()\r\n g2 = self.graph_conv(atoms_2, adjs_2)\r\n atoms_2 = self.graph_conv.get_atom_array()\r\n\r\n g1, g2 = self.attn(atoms_1, g1, atoms_2, g2)\r\n\r\n if self.mlp.__class__.__name__ == 'MLP':\r\n g = F.concat((g1, g2), axis=-1)\r\n g = self.mlp(g)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'NTN':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'SymMLP':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'HolE':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'DistMult':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'Cosine':\r\n g = self.mlp(g1, g2)\r\n return g\r\n else:\r\n ValueError('[ERROR] No methods for similarity prediction')\r\n\r\n def predict(self, atoms_1, adjs_1, atoms_2, adjs_2):\r\n if self.symmetric is None:\r\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\r\n x = self.__call__(atoms_1, adjs_1, atoms_2, adjs_2)\r\n target = F.sigmoid(x)\r\n if self.xp == cuda.cupy:\r\n target = cuda.to_gpu(target)\r\n return target\r\n elif self.symmetric == 'or' or self.symmetric == 'and':\r\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\r\n x1 = self.__call__(atoms_1, adjs_1, atoms_2, adjs_2)\r\n target1 = F.sigmoid(x1)\r\n x2 = self.__call__(atoms_2, adjs_2, atoms_1, adjs_1)\r\n target2 = F.sigmoid(x2)\r\n if self.xp == cuda.cupy:\r\n target1 = cuda.to_gpu(target1)\r\n target2 = cuda.to_gpu(target2)\r\n if self.symmetric == 'or':\r\n target = self.xp.max([target1, target2])\r\n elif self.symmetric == 'and':\r\n target = self.xp.min([target1, target2])\r\n return target\r\n\r\n\r\ndef set_up_predictor(method,\r\n fp_hidden_dim, fp_out_dim, conv_layers, concat_hidden,\r\n fp_dropout_rate, fp_batch_normalization,\r\n net_hidden_dims, class_num,\r\n weight_typing=True, sim_method='mlp', symmetric=None,\r\n attn_model=None\r\n ):\r\n\r\n sim_method_dict = {\r\n 'mlp': 'multi-layered perceptron',\r\n 'ntn': 'bilinear transform',\r\n 'symmlp': 'symmetric perceptron',\r\n 'hole': 'holographic embedding',\r\n 'dist-mult': 'dist-mult',\r\n }\r\n\r\n method_dict = {\r\n 'ggnn': 'GGNN',\r\n 'nfp': 'NFP',\r\n }\r\n\r\n logging.info('Graph Embedding: {}'.format(method_dict.get(method, None)))\r\n logging.info('Link Prediction: {}'.format(sim_method_dict.get(sim_method, None)))\r\n\r\n lp = None\r\n if sim_method == 'mlp':\r\n lp = MLP(out_dim=class_num, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'ntn':\r\n ntn_out_dim = 8\r\n lp = NTN(left_dim=fp_out_dim, right_dim=fp_out_dim, out_dim=class_num,\r\n ntn_out_dim=ntn_out_dim, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'symmlp':\r\n lp = MLP(out_dim=class_num, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'hole':\r\n lp = HolE(out_dim=class_num, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'dist-mult':\r\n dm_out_dim = 8\r\n lp = DistMult(left_dim=fp_out_dim, right_dim=fp_out_dim, out_dim=class_num,\r\n dm_out_dim=dm_out_dim, hidden_dims=net_hidden_dims)\r\n else:\r\n raise ValueError('[ERROR] Invalid link prediction model: {}'.format(method))\r\n\r\n attn = None\r\n scorer = 'bilinear'\r\n if attn_model == 'alter':\r\n attn_weight_tying = True\r\n logging.info('Using alternating co-attention')\r\n if attn_weight_tying:\r\n logging.info('Weight is tying')\r\n attn = AlternatingCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=8, weight_tying=True)\r\n elif attn_model == 'para':\r\n attn_weight_tying = True\r\n logging.info('Using parallel co-attention')\r\n logging.info('Scorer is {}'.format(scorer))\r\n if attn_weight_tying:\r\n logging.info('Weight is tying')\r\n attn = ParallelCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=1,\r\n activation=F.tanh, weight_tying=attn_weight_tying)\r\n elif attn_model == 'circ':\r\n logging.info('Using circular based parallel co-attention')\r\n attn = CircularParallelCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n activation=F.tanh)\r\n\r\n elif attn_model == 'vqa':\r\n logging.info('Using vqa fine-grained co-attention')\r\n attn = VQAParallelCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=8)\r\n\r\n elif attn_model == 'pool':\r\n logging.info('Using pool fine-graind co-attention')\r\n attn = PoolingFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim)\r\n\r\n elif attn_model == 'lt':\r\n logging.info('Using lt fine-grained co-attention')\r\n attn = LinearTransformFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim)\r\n\r\n elif attn_model == 'nie':\r\n logging.info('Using nie fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = NieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'deep':\r\n logging.info('Using deep fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = DeepNieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'very-deep':\r\n logging.info('Using very deep fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = VeryDeepNieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'extreme-deep':\r\n logging.info('Using very deep fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = ExtremeDeepNieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'fourier':\r\n logging.info('Using fourier fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = FourierFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'bimpm':\r\n logging.info('Using bimpm matching strategy')\r\n attn = BiMPM(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=fp_out_dim,\r\n with_max_pool=True, with_att_mean=True, with_att_max=True, aggr=F.sum)\r\n\r\n else:\r\n raise ValueError('[ERROR] Invalid Co-Attention Method.')\r\n\r\n encoder = None\r\n if method == 'ggnn':\r\n if not weight_typing:\r\n logging.info('Weight is not tying')\r\n if fp_dropout_rate != 0.0:\r\n logging.info('Forward propagation dropout rate is {:.1f}'.format(fp_dropout_rate))\r\n if fp_batch_normalization:\r\n logging.info('Using batch normalization')\r\n if concat_hidden:\r\n logging.info('Using concatenation between layers')\r\n\r\n encoder = GGNN(out_dim=fp_out_dim, hidden_dim=fp_hidden_dim, n_layers=conv_layers,\r\n concat_hidden=concat_hidden, weight_tying=weight_typing)\r\n elif method == 'nfp':\r\n print('Training an NFP predictor...')\r\n encoder = NFP(out_dim=fp_out_dim, hidden_dim=fp_hidden_dim, n_layers=conv_layers, concat_hidden=concat_hidden)\r\n\r\n else:\r\n raise ValueError('[ERROR] Invalid graph embedding encoder.')\r\n\r\n predictor = GraphConvPredictorForPair(encoder, attn, lp, symmetric=symmetric)\r\n return predictor\r\n\r\n\r\ndef augment_dataset(dataset):\r\n dataset_tuple = dataset.get_datasets()\r\n atoms1, adjs1, atoms2, adjs2, labels = dataset_tuple\r\n new_atoms1 = np.concatenate((atoms1, atoms2), axis=0)\r\n new_atoms2 = np.concatenate((atoms2, atoms1), axis=0)\r\n new_adjs1 = np.concatenate((adjs1, adjs2), axis=0)\r\n new_adjs2 = np.concatenate((adjs2, adjs1), axis=0)\r\n new_labels = np.concatenate((labels, labels), axis=0)\r\n new_dataset = NumpyTupleDataset(new_atoms1, new_adjs1, new_atoms2, new_adjs2, new_labels)\r\n return new_dataset\r\n\r\n\r\ndef parse_arguments():\r\n # Lists of supported preprocessing methods/models.\r\n method_list = ['nfp', 'ggnn', 'schnet', 'weavenet', 'rsgcn', 'ecfp']\r\n sim_method_list = ['mlp', 'cosine', 'ntn', 'symmlp', 'hole', 'dist-mult']\r\n layer_aggregator_list = ['gru-attn', 'gru', 'lstm-attn', 'lstm', 'attn', 'self-attn', 'concat', 'max-pool']\r\n attn_list = ['para', 'alter', 'circ', 'vqa', 'pool', 'lt', 'nie', 'bimpm', 'deep', 'fourier', 'very-deep', 'extreme-deep']\r\n\r\n # Set up the argument parser.\r\n parser = ArgumentParser(description='Classification on ddi dataset')\r\n parser.add_argument('--datafile', '-d', type=str,\r\n default='ddi_train.csv',\r\n help='csv file containing the dataset')\r\n parser.add_argument('--train-datafile', type=str,\r\n default='ddi_train.csv',\r\n help='csv file containing the train dataset')\r\n parser.add_argument('--train-pos-neg-ratio', type=float,\r\n default=-1.,\r\n help='ratio between positive and negative instances')\r\n parser.add_argument('--valid-datafile', type=str,\r\n default='ddi_test.csv',\r\n help='csv file containing the test dataset')\r\n parser.add_argument('--method', '-m', type=str, choices=method_list,\r\n help='method name', default='nfp')\r\n parser.add_argument('--sim-method', type=str, choices=sim_method_list,\r\n help='similarity method', default='mlp')\r\n parser.add_argument('--label', '-l', nargs='+',\r\n default=['label', ],\r\n help='target label for classification')\r\n parser.add_argument('--class-names', type=str,\r\n default=['interaction', 'no interactions'],\r\n help='class names in classification task')\r\n parser.add_argument('--conv-layers', '-c', type=int, default=4,\r\n help='number of convolution layers')\r\n parser.add_argument('--batchsize', '-b', type=int, default=32,\r\n help='batch size')\r\n parser.add_argument('--gpu', '-g', type=int, default=-1,\r\n help='id of gpu to use; negative value means running'\r\n 'the code on cpu')\r\n parser.add_argument('--out', '-o', type=str, default='result',\r\n help='path to save the computed models to')\r\n parser.add_argument('--epoch', '-e', type=int, default=10,\r\n help='number of epochs')\r\n parser.add_argument('--learning-rate', type=float, default=0.001,\r\n help='learning rate of optimizer')\r\n parser.add_argument('--weight-decay-rate', type=float, default=0.,\r\n help='weight decay rate of optimizer')\r\n parser.add_argument('--exp-shift-rate', type=float, default=1.,\r\n help='exponential shift rate')\r\n parser.add_argument('--exp-shift-strategy', type=int, default=1,\r\n help='strategy to adapt the learning rate manually')\r\n parser.add_argument('--lin-shift-rate', type=float, default=0.,\r\n help='linear shift rate')\r\n parser.add_argument('--unit-num', '-u', type=int, default=16,\r\n help='number of units in one layer of the models')\r\n parser.add_argument('--fp-out-dim', type=int, default=16,\r\n help='dimensionality of output of dynamic fingerprint')\r\n parser.add_argument('--fp-hidden-dim', type=int, default=16,\r\n help='dimensionality of hidden units in dynamic fingerprint')\r\n parser.add_argument('--fp-attention', type=bool, default=False,\r\n help='whether to use attention mechanism in dynamic fingerprint')\r\n parser.add_argument('--update-attention', type=bool, default=False,\r\n help='whether to use attention mechasnim in update')\r\n parser.add_argument('--concat-hidden', type=bool, default=False,\r\n help='whether to concatenate the hidden states in all graphconv layers')\r\n parser.add_argument('--fp-max-degree', type=int, default=6,\r\n help='max degrees of neural fingerprint')\r\n parser.add_argument('--weight-tying', type=str, default=True,\r\n help='whether to use the same parameters in all layers(Default: True)')\r\n parser.add_argument('--attention-tying', type=str, default=True,\r\n help='whether to use the same parameter in all attention(Default: True)')\r\n parser.add_argument('--fp-dropout-rate', type=float, default=0.0,\r\n help='dropout rate in graph convolutional neural network')\r\n parser.add_argument('--fp-bn', type=str, default='False',\r\n help='whether to use batch normalization in dynamic fingerprint')\r\n\r\n parser.add_argument('--attn', type=str, default=None, choices=attn_list,\r\n help='indicate the type of co-attention')\r\n\r\n parser.add_argument('--net-hidden-dims', type=str, default='32,16',\r\n help='dimensionality of hidden units in neural network for similarity prediction')\r\n parser.add_argument('--net-layer-num', type=int, default=2,\r\n help='number of layers in neural network for similarity prediction')\r\n parser.add_argument('--layer-aggregator', type=str, default='', choices=layer_aggregator_list,\r\n help='layer aggregator in dynamic fingerprint (Default: )')\r\n parser.add_argument('--seed', '-s', type=int, default=777,\r\n help='random seed value')\r\n parser.add_argument('--train-data-ratio', '-r', type=float, default=0.8,\r\n help='ratio of training data w.r.t the dataset')\r\n parser.add_argument('--protocol', type=int, default=2,\r\n help='pickle protocol version')\r\n parser.add_argument('--model-filename', type=str, default='classifier.pkl',\r\n help='saved models filename')\r\n parser.add_argument('--resume', type=str, default='',\r\n help='path to a trainer snapshot')\r\n parser.add_argument('--context', type=str, default='False',\r\n help='whether to use context embedding in dynamic fingerprint')\r\n parser.add_argument('--context-layers', type=int, default=1,\r\n help='number of context layers')\r\n parser.add_argument('--context-dropout', type=float, default=0.,\r\n help='dropout rate of context layers')\r\n parser.add_argument('--message-function', type=str, default='matrix_multiply',\r\n help='message function in dynamic fingerprint (default: matrix_multiply)')\r\n\r\n parser.add_argument('--readout-function', type=str, default='graph_level',\r\n help='readout function in dynamic fingerprint (default: graph_level)')\r\n parser.add_argument('--num-timesteps', type=int, default=3,\r\n help='number of timesteps in set2vec readout function')\r\n parser.add_argument('--num-output-hidden-layers', type=int, default=0,\r\n help='number of hidden layers in set2vec readout function')\r\n parser.add_argument('--output-hidden-dim', type=int, default=16,\r\n help='number of hidden units in each hidden layer in set2vec readout function')\r\n parser.add_argument('--output-activation', type=str, choices=['relu'],\r\n default='relu', help='activation function used in set2vec readout function')\r\n\r\n parser.add_argument('--multi-gpu', type=str, default='False',\r\n help='whether to use multiple GPUs')\r\n\r\n parser.add_argument('--augment', type=str, default='False',\r\n help='whether to use data augment')\r\n\r\n parser.add_argument('--max-norm', type=float, default=0.,\r\n help='the maximum value of gradient in back propagation')\r\n parser.add_argument('--l2-rate', type=float, default=0.,\r\n help='coefficient for the L2 regularization')\r\n parser.add_argument('--l1-rate', type=float, default=0.,\r\n help='coefficient for the L1 regularization')\r\n\r\n parser.add_argument('--loss-func', type=str, default='cross-entropy',\r\n help='loss function training the models')\r\n\r\n parser.add_argument('--symmetric', type=str, default=None,\r\n help='how to use symmetric in prediction')\r\n return parser.parse_args()\r\n\r\n\r\ndef modify_dataset_for_hinge(dataset):\r\n atoms1, adjs1, atoms2, adjs2, labels = dataset.get_datasets()\r\n labels_squeezed = np.squeeze(labels, axis=1)\r\n new_dataset = NumpyTupleDataset(atoms1, adjs1, atoms2, adjs2, labels_squeezed)\r\n return new_dataset\r\n\r\n\r\ndef main():\r\n # Parse the arguments.\r\n args = parse_arguments()\r\n augment = False if args.augment == 'False' else True\r\n multi_gpu = False if args.multi_gpu == 'False' else True\r\n if args.label:\r\n labels = args.label\r\n class_num = len(labels) if isinstance(labels, list) else 1\r\n else:\r\n raise ValueError('No target label was specified.')\r\n\r\n # Dataset preparation. Postprocessing is required for the regression task.\r\n def postprocess_label(label_list):\r\n label_arr = np.asarray(label_list, dtype=np.int32)\r\n return label_arr\r\n\r\n # Apply a preprocessor to the dataset.\r\n logging.info('Preprocess train dataset and test dataset...')\r\n preprocessor = preprocess_method_dict[args.method]()\r\n parser = CSVFileParserForPair(preprocessor, postprocess_label=postprocess_label,\r\n labels=labels, smiles_cols=['smiles_1', 'smiles_2'])\r\n train = parser.parse(args.train_datafile)['dataset']\r\n test = parser.parse(args.valid_datafile)['dataset']\r\n\r\n if augment:\r\n logging.info('Utilizing data augmentation in train set')\r\n train = augment_dataset(train)\r\n\r\n num_train = train.get_datasets()[0].shape[0]\r\n num_test = test.get_datasets()[0].shape[0]\r\n logging.info('Train/test split: {}/{}'.format(num_train, num_test))\r\n\r\n if len(args.net_hidden_dims):\r\n net_hidden_dims = tuple([int(net_hidden_dim) for net_hidden_dim in args.net_hidden_dims.split(',')])\r\n else:\r\n net_hidden_dims = ()\r\n\r\n weight_tying = False if args.weight_tying == 'False' else True\r\n fp_batch_normalization = True if args.fp_bn == 'True' else False\r\n\r\n predictor = set_up_predictor(method=args.method,\r\n fp_hidden_dim=args.fp_hidden_dim, fp_out_dim=args.fp_out_dim,\r\n conv_layers=args.conv_layers, concat_hidden=args.concat_hidden,\r\n fp_dropout_rate=args.fp_dropout_rate, fp_batch_normalization=fp_batch_normalization,\r\n net_hidden_dims=net_hidden_dims, class_num=class_num,\r\n sim_method=args.sim_method, weight_typing=weight_tying,\r\n symmetric=args.symmetric, attn_model=args.attn,\r\n )\r\n\r\n if args.train_pos_neg_ratio != -1.:\r\n # Set up the iterator.\r\n train_dataset = train.get_datasets()\r\n atoms1_train, adjs1_train, atoms2_train, adjs2_train, labels_train = train_dataset\r\n labels_train = np.squeeze(labels_train)\r\n train_dataset_arr = np.concatenate([item[:, None] if len(item.shape) == 1 else item for item in list(train_dataset)], axis=1)\r\n pos_train_dataset_arr = train_dataset_arr[labels_train == 1]\r\n num_pos_train = pos_train_dataset_arr.shape[0]\r\n pos_train_indices = np.arange(0, num_pos_train)\r\n neg_train_dataset_arr = train_dataset_arr[labels_train == 0]\r\n num_neg_train = neg_train_dataset_arr.shape[0]\r\n pos_neg_train_ratio = args.train_pos_neg_ratio\r\n num_pos_train = int(pos_neg_train_ratio * num_neg_train)\r\n np.random.seed(777)\r\n np.random.shuffle(pos_train_indices)\r\n pos_train_indices = pos_train_indices[:num_pos_train]\r\n pos_train_dataset_arr = pos_train_dataset_arr[pos_train_indices]\r\n new_train_dataset_arr = np.concatenate((pos_train_dataset_arr, neg_train_dataset_arr), axis=0)\r\n atoms1_train, adjs1_train = new_train_dataset_arr[:, 0], new_train_dataset_arr[:, 1]\r\n atoms2_train, adjs2_train = new_train_dataset_arr[:, 2], new_train_dataset_arr[:, 3]\r\n labels_train = new_train_dataset_arr[:, 4].astype(np.int32)\r\n labels_train = np.expand_dims(labels_train, axis=1)\r\n train = NumpyTupleDataset(atoms1_train, adjs1_train, atoms2_train, adjs2_train, labels_train)\r\n num_train = train.get_datasets()[0].shape[0]\r\n num_test = test.get_datasets()[0].shape[0]\r\n logging.info('Train pos-neg ratio is {:.4f}'.format(args.train_pos_neg_ratio))\r\n logging.info('Train/test number is {}/{}'.format(num_train, num_test))\r\n\r\n # if args.loss_func == 'hinge':\r\n # modify_dataset_for_hinge(train)\r\n # Set up the iterator.\r\n train_iter = SerialIterator(train, args.batchsize)\r\n test_iter = SerialIterator(test, args.batchsize,\r\n repeat=False, shuffle=False)\r\n\r\n metrics_fun = {'accuracy': F.binary_accuracy}\r\n loss_func = F.sigmoid_cross_entropy\r\n if args.loss_func == 'hinge':\r\n logging.info('Loss function is {}'.format(args.loss_func))\r\n loss_func = F.hinge\r\n metrics_fun = {'accuracy': F.accuracy}\r\n classifier = Classifier(predictor, lossfun=loss_func,\r\n metrics_fun=metrics_fun, device=args.gpu)\r\n\r\n # Set up the optimizer.\r\n optimizer = optimizers.Adam(alpha=args.learning_rate, weight_decay_rate=args.weight_decay_rate)\r\n # optimizer = optimizers.Adam()\r\n # optimizer = optimizers.SGD(lr=args.learning_rate)\r\n optimizer.setup(classifier)\r\n # add regularization\r\n if args.max_norm > 0:\r\n optimizer.add_hook(chainer.optimizer.GradientClipping(threshold=args.max_norm))\r\n if args.l2_rate > 0:\r\n optimizer.add_hook(chainer.optimizer.WeightDecay(rate=args.l2_rate))\r\n if args.l1_rate > 0:\r\n optimizer.add_hook(chainer.optimizer.Lasso(rate=args.l1_rate))\r\n\r\n # Set up the updater.\r\n if multi_gpu:\r\n logging.info('Using multiple GPUs')\r\n updater = training.ParallelUpdater(train_iter, optimizer, devices={'main': 0, 'second': 1},\r\n converter=concat_mols)\r\n else:\r\n logging.info('Using single GPU')\r\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu,\r\n converter=concat_mols)\r\n\r\n # Set up the trainer.\r\n logging.info('Training...')\r\n # add stop_trigger parameter\r\n early_stop = triggers.EarlyStoppingTrigger(monitor='validation/main/loss', patients=50, max_trigger=(500, 'epoch'))\r\n out = 'output' + '/' + args.out\r\n trainer = training.Trainer(updater, stop_trigger=early_stop, out=out)\r\n\r\n # trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\r\n\r\n trainer.extend(E.Evaluator(test_iter, classifier,\r\n device=args.gpu, converter=concat_mols))\r\n\r\n train_eval_iter = SerialIterator(train, args.batchsize,\r\n repeat=False, shuffle=False)\r\n\r\n trainer.extend(AccuracyEvaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_acc',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(AccuracyEvaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_acc',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n trainer.extend(ROCAUCEvaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_roc',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(ROCAUCEvaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_roc',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n trainer.extend(PRCAUCEvaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_prc',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(PRCAUCEvaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_prc',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n # trainer.extend(PrecisionEvaluator(\r\n # train_eval_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='train_p',\r\n # pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # # extension name='validation' is already used by `Evaluator`,\r\n # # instead extension name `val` is used.\r\n # trainer.extend(PrecisionEvaluator(\r\n # val_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='val_p',\r\n # pos_labels=1, ignore_labels=-1))\r\n #\r\n # trainer.extend(RecallEvaluator(\r\n # train_eval_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='train_r',\r\n # pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # # extension name='validation' is already used by `Evaluator`,\r\n # # instead extension name `val` is used.\r\n # trainer.extend(RecallEvaluator(\r\n # val_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='val_r',\r\n # pos_labels=1, ignore_labels=-1))\r\n\r\n trainer.extend(F1Evaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_f',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(F1Evaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_f',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n # apply shift strategy to learning rate every 10 epochs\r\n # trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate), trigger=(10, 'epoch'))\r\n if args.exp_shift_strategy == 1:\r\n trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate),\r\n trigger=triggers.ManualScheduleTrigger([10, 20, 30, 40, 50, 60], 'epoch'))\r\n elif args.exp_shift_strategy == 2:\r\n trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate),\r\n trigger=triggers.ManualScheduleTrigger([5, 10, 15, 20, 25, 30], 'epoch'))\r\n elif args.exp_shift_strategy == 3:\r\n trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate),\r\n trigger=triggers.ManualScheduleTrigger([5, 10, 15, 20, 25, 30, 40, 50, 60, 70], 'epoch'))\r\n else:\r\n raise ValueError('No such strategy to adapt learning rate')\r\n # # observation of learning rate\r\n trainer.extend(E.observe_lr(), trigger=(1, 'iteration'))\r\n\r\n entries = [\r\n 'epoch',\r\n 'main/loss', 'train_acc/main/accuracy', 'train_roc/main/roc_auc', 'train_prc/main/prc_auc',\r\n # 'train_p/main/precision', 'train_r/main/recall',\r\n 'train_f/main/f1',\r\n 'validation/main/loss', 'val_acc/main/accuracy', 'val_roc/main/roc_auc', 'val_prc/main/prc_auc',\r\n # 'val_p/main/precision', 'val_r/main/recall',\r\n 'val_f/main/f1',\r\n 'lr',\r\n 'elapsed_time']\r\n trainer.extend(E.PrintReport(entries=entries))\r\n # change from 10 to 2 on Mar. 1 2019\r\n trainer.extend(E.snapshot(), trigger=(2, 'epoch'))\r\n trainer.extend(E.LogReport())\r\n trainer.extend(E.ProgressBar())\r\n trainer.extend(E.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png'))\r\n trainer.extend(E.PlotReport(['train_acc/main/accuracy', 'val_acc/main/accuracy'], 'epoch', file_name='accuracy.png'))\r\n\r\n if args.resume:\r\n resume_path = os.path.join(out, args.resume)\r\n logging.info('Resume training according to snapshot in {}'.format(resume_path))\r\n chainer.serializers.load_npz(resume_path, trainer)\r\n\r\n trainer.run()\r\n\r\n # Save the regressor's parameters.\r\n model_path = os.path.join(out, args.model_filename)\r\n logging.info('Saving the trained models to {}...'.format(model_path))\r\n classifier.save_pickle(model_path, protocol=args.protocol)\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.info(ROOT_PATH)\r\n\r\n main()\r\n"
] | [
[
"numpy.random.shuffle",
"numpy.squeeze",
"numpy.random.seed",
"numpy.asarray",
"numpy.arange",
"numpy.expand_dims",
"matplotlib.use",
"numpy.concatenate"
]
] |
mhannani/ZinVert | [
"d54e1ab1980ed70945c34d2ceb294d559126f623"
] | [
"src/utils/create_seq2seq.py"
] | [
"import torch.nn as nn\nfrom torch.optim import Adam\nfrom src.models.Seq2seq import Seq2Seq\nfrom src.models.Decoder import Decoder, OneStepDecoder, OneStepDecoderWithAttention, DecoderWithAttention\nfrom src.models.Encoder import Encoder, EncoderAttention\nfrom src.models.Attention import Attention\nfrom src.data.config import *\n\n\ndef create_seq2seq(src_vocab, tgt_vocab):\n \"\"\"\n Creates encoder, decoder, defines optimizer, and loss function.\n :param src_vocab: torchtext.vocab.vocab.Vocab\n source language vocabulary\n :param tgt_vocab: torchtext.vocab.vocab.Vocab\n target language vocabulary\n :return: model, optimizer, criterion\n see : https://datascience.stackexchange.com/questions/10250/what-is-the-difference-between-objective-error-criterion-cost-loss-fun/10263\n \"\"\"\n\n # vocabularies size\n src_vocab__len = len(src_vocab)\n tgt_vocab__len = len(tgt_vocab)\n\n # encoder model\n encoder = Encoder(src_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, N_LAYERS, DROPOUT)\n\n # one step decoder model\n one_step_decoder = OneStepDecoder(tgt_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM)\n\n # decoder model\n decoder = Decoder(one_step_decoder, device=DEVICE)\n\n # encoder -> decoder\n seq2seq = Seq2Seq(encoder, decoder)\n\n # move the model to device\n seq2seq.to(DEVICE)\n\n # Adam optimizer\n optimizer = Adam(seq2seq.parameters())\n\n # ignore padding indices\n # TGT_PAD_IDX = tgt_vocab.lookup_indices([SPECIAL_SYMBOLS[PAD_IDX]])[0]\n TGT_PAD_IDX = 1\n\n # loss function\n criterion = nn.CrossEntropyLoss(ignore_index=TGT_PAD_IDX)\n\n return seq2seq, optimizer, criterion\n\n\ndef create_seq2seq_with_att(src_vocab, tgt_vocab):\n \"\"\"\n Creates encoder, decoder, defines optimizer, and loss function with the attention mechanism\n :param src_vocab: torchtext.vocab.vocab.Vocab\n source language vocabulary\n :param tgt_vocab: torchtext.vocab.vocab.Vocab\n target language vocabulary\n :return: model, optimizer, criterion\n see : https://datascience.stackexchange.com/questions/10250/what-is-the-difference-between-objective-error-criterion-cost-loss-fun/10263\n \"\"\"\n\n # vocabularies size\n src_vocab__len = len(src_vocab.vocab)\n tgt_vocab__len = len(tgt_vocab.vocab)\n\n # encoder model\n encoder = EncoderAttention(src_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, N_LAYERS, DROPOUT)\n\n # attention model\n attention = Attention(HIDDEN_DIM, HIDDEN_DIM)\n\n # one step decoder model\n one_step_decoder = OneStepDecoderWithAttention(tgt_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, HIDDEN_DIM, attention)\n\n # decoder model\n decoder = DecoderWithAttention(one_step_decoder, device='cpu')\n\n # encoder -> decoder\n seq2seq = Seq2Seq(encoder, decoder)\n\n # move the model to device\n seq2seq.to('cpu')\n\n # Adam optimizer\n optimizer = Adam(seq2seq.parameters())\n\n # ignore padding indices\n # TGT_PAD_IDX = tgt_vocab.lookup_indices([SPECIAL_SYMBOLS[PAD_IDX]])[0]\n TGT_PAD_IDX = 1\n\n # loss function\n criterion = nn.CrossEntropyLoss(ignore_index=TGT_PAD_IDX)\n\n return seq2seq, optimizer, criterion\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.nn.CrossEntropyLoss"
]
] |
bhlarson/EmbeddedClassification | [
"68ede2d08c9e110c37ebb5c31a5e4c5d1abc52f2"
] | [
"infer_imdb_tfl.py"
] | [
"\n\n\"\"\"Train a Resnet model for age classification and gender regression from the imdb dataset.\"\"\"\n#from __future__ import absolute_import\n#from __future__ import division\n#from __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport shutil\nimport glob\nimport cv2\nimport numpy as np\nimport datetime\n\nUSE_TFL = False\nif USE_TFL:\n import tflite_runtime.interpreter as tflite\nelse:\n import tensorflow as tf\n\nprint('Python Version {}'.format(sys.version))\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--debug', action='store_true', help='Wait for debugger attach')\nparser.add_argument('--model', type=str, default='./tflite/1589806577_int8.tflite', help='Model path')\n\nparser.add_argument('--data_dir', type=str, \n #default='/home/mendel/data/imdb',\n default='/store/Datasets/imdb/imdb_crop/18',\n help='Path to the directory containing the imdb data tf record.')\n\nparser.add_argument('--match', type=str, default='*',\n help='File wildcard')\n\n\n_HEIGHT = 200\n_WIDTH = 200\n_DEPTH = 3\n\n\ndef get_filenames(data_dir, ext):\n \"\"\"Return a list of filenames.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: path to the the directory containing the input data.\n\n Returns:\n A list of file names.\n \"\"\"\n return glob.glob(os.path.join(data_dir, ext))\n\ndef build_engine(FLAGS):\n uff_model = uff.from_tensorflow_frozen_model(FLAGS.model, debug_mode=True, return_graph_info=True)\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:\n builder.max_workspace_size = 1 << 30\n builder.fp16_mode = True\n builder.max_batch_size = 1\n parser.register_input(\"Input\", (3, _WIDTH, _HEIGHT))\n parser.register_output(\"MarkOutput_0\")\n parser.parse(uff_model_path, network)\n \n print(\"Building TensorRT engine, this may take a few minutes...\")\n trt_engine = builder.build_cuda_engine(network)\n \n\ndef main(FLAGS):\n\n if USE_TFL:\n interpreter = tflite.Interpreter(model_path=FLAGS.model)\n else:\n interpreter = tf.lite.Interpreter(model_path=FLAGS.model)\n \n interpreter.allocate_tensors()\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details() \n\n #loaded = tf.saved_model.load(FLAGS.model)\n #print(list(loaded.signatures.keys()))\n #infer = loaded.signatures[\"serving_default\"]\n #print(infer.structured_outputs)\n #print (infer.inputs[0])\n imgs = get_filenames(FLAGS.data_dir, FLAGS.match)\n\n img = cv2.imread(imgs[0])\n imgShape = img.shape\n center = np.array([imgShape[1]/2, imgShape[0]/2])\n d = np.array([_HEIGHT/2,_WIDTH/2])\n p1 = tuple((center-d).astype(int))\n p1 = (max(p1[0],0),max(p1[1],0))\n p2 = tuple((center+d).astype(int))\n p2 = (min(p2[0],imgShape[0]-1),min(p2[1],imgShape[1]-1))\n crop = cv2.resize(img[p1[1]:p2[1], p1[0]:p2[0]],(_WIDTH,_HEIGHT))\n interpreter.set_tensor(input_details[0]['index'], crop.astype(np.float32))\n interpreter.invoke()\n\n # The function `get_tensor()` returns a copy of the tensor data.\n # Use `tensor()` in order to get a pointer to the tensor.\n age = interpreter.get_tensor(output_details[0]['index'])[0][0]\n\n gender = 'male'\n if(interpreter.get_tensor(output_details[1]['index'])[0] < 1):\n gender = 'female'\n\n print('{}:{}, {}:{}'.format(output_details[0]['name'], age, output_details[1]['name'],gender))\n\n start_time = datetime.datetime.now()\n for i, imfile in enumerate(imgs):\n img = cv2.imread(imfile)\n imgShape = img.shape\n center = np.array([imgShape[1]/2, imgShape[0]/2])\n d = np.array([_HEIGHT/2,_WIDTH/2])\n p1 = tuple((center-d).astype(int))\n p1 = (max(p1[0],0),max(p1[1],0))\n p2 = tuple((center+d).astype(int))\n p2 = (min(p2[0],imgShape[0]-1),min(p2[1],imgShape[1]-1))\n crop = cv2.resize(img[p1[1]:p2[1], p1[0]:p2[0]],(_WIDTH,_HEIGHT))\n interpreter.set_tensor(input_details[0]['index'], crop.astype(np.float32))\n interpreter.invoke()\n\n\n age = interpreter.get_tensor(output_details[0]['index'])[0][0]\n\n gender = 'male'\n if(interpreter.get_tensor(output_details[1]['index'])[0] < 1):\n gender = 'female'\n\n print('{}:{}, {}:{} file {}'.format(output_details[0]['name'], age, output_details[1]['name'],gender, imfile))\n\n analysis_done = datetime.datetime.now()\n total_time = (analysis_done-start_time).total_seconds()\n\n print('average image time {}'.format(total_time/len(imgs)))\n\nif __name__ == '__main__':\n FLAGS, unparsed = parser.parse_known_args()\n\n if FLAGS.debug:\n # https://code.visualstudio.com/docs/python/debugging#_remote-debugging\n # Launch applicaiton on remote computer: \n # > python3 -m ptvsd --host 0.0.0.0 --port 3000 --wait predict_imdb.py\n import ptvsd\n # Allow other computers to attach to ptvsd at this IP address and port.\n ptvsd.enable_attach(address=('0.0.0.0', 3000), redirect_output=True)\n # Pause the program until a remote debugger is attached\n print(\"Wait for debugger attach\")\n ptvsd.wait_for_attach()\n print(\"Debugger Attached\")\n\n main(FLAGS)\n print('complete')\n"
] | [
[
"numpy.array",
"tensorflow.lite.Interpreter"
]
] |
b-fontana/law | [
"8fca50fc1aa54647e4abd6dec4ff5d8ac2622865"
] | [
"law/contrib/keras/formatter.py"
] | [
"# coding: utf-8\n\n\"\"\"\nKeras target formatters.\n\"\"\"\n\n\n__all__ = [\"KerasModelFormatter\", \"TFKerasModelFormatter\"]\n\n\nfrom law.target.formatter import Formatter\nfrom law.target.file import get_path\n\n\nclass ModelFormatter(Formatter):\n\n @classmethod\n def accepts(cls, path):\n return get_path(path).endswith(\".h5\")\n\n @classmethod\n def dump(cls, path, model, *args, **kwargs):\n model.save(path, *args, **kwargs)\n\n\nclass KerasModelFormatter(ModelFormatter):\n\n name = \"keras\"\n\n @classmethod\n def load(cls, path, *args, **kwargs):\n from keras.models import load_model\n return load_model(path, *args, **kwargs)\n\n\nclass TFKerasModelFormatter(ModelFormatter):\n\n name = \"tf_keras\"\n\n @classmethod\n def load(cls, path, *args, **kwargs):\n from tensorflow import keras\n return keras.models.load_model(path, *args, **kwargs)\n"
] | [
[
"tensorflow.keras.models.load_model"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.